diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 75e1df4261..770b000fca 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -44,7 +44,6 @@ try: import ccmlib - from ccmlib.dse_cluster import DseCluster from ccmlib.cluster import Cluster as CCMCluster from ccmlib.scylla_cluster import ScyllaCluster as CCMScyllaCluster from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory @@ -122,98 +121,32 @@ def cmd_line_args_to_dict(env_var): args[cmd_arg.lstrip('-')] = cmd_arg_value return args - -def _get_cass_version_from_dse(dse_version): - if dse_version.startswith('4.6') or dse_version.startswith('4.5'): - raise Exception("Cassandra Version 2.0 not supported anymore") - elif dse_version.startswith('4.7') or dse_version.startswith('4.8'): - cass_ver = "2.1" - elif dse_version.startswith('5.0'): - cass_ver = "3.0" - elif dse_version.startswith('5.1'): - # TODO: refactor this method to use packaging.Version everywhere - if Version(dse_version) >= Version('5.1.2'): - cass_ver = "3.11" - else: - cass_ver = "3.10" - elif dse_version.startswith('6.0'): - if dse_version == '6.0.0': - cass_ver = '4.0.0.2284' - elif dse_version == '6.0.1': - cass_ver = '4.0.0.2349' - else: - cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) - elif Version(dse_version) >= Version('6.7'): - if dse_version == '6.7.0': - cass_ver = "4.0.0.67" - else: - cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) - elif dse_version.startswith('6.8'): - if dse_version == '6.8.0': - cass_ver = "4.0.0.68" - else: - cass_ver = '4.0.0.' + ''.join(dse_version.split('.')) - else: - log.error("Unknown dse version found {0}, defaulting to 2.1".format(dse_version)) - cass_ver = "2.1" - return Version(cass_ver) - - -def _get_dse_version_from_cass(cass_version): - if cass_version.startswith('2.1'): - dse_ver = "4.8.15" - elif cass_version.startswith('3.0'): - dse_ver = "5.0.12" - elif cass_version.startswith('3.10') or cass_version.startswith('3.11'): - dse_ver = "5.1.7" - elif cass_version.startswith('4.0'): - dse_ver = "6.0" - else: - log.error("Unknown cassandra version found {0}, defaulting to 2.1".format(cass_version)) - dse_ver = "2.1" - return dse_ver - USE_CASS_EXTERNAL = bool(os.getenv('USE_CASS_EXTERNAL', False)) KEEP_TEST_CLUSTER = bool(os.getenv('KEEP_TEST_CLUSTER', False)) SIMULACRON_JAR = os.getenv('SIMULACRON_JAR', None) -CLOUD_PROXY_PATH = os.getenv('CLOUD_PROXY_PATH', None) -# Supported Clusters: Cassandra, DDAC, DSE, Scylla -DSE_VERSION = None +# Supported Clusters: Cassandra, Scylla SCYLLA_VERSION = os.getenv('SCYLLA_VERSION', None) -if os.getenv('DSE_VERSION', None): # we are testing against DSE - DSE_VERSION = Version(os.getenv('DSE_VERSION', None)) - DSE_CRED = os.getenv('DSE_CREDS', None) - CASSANDRA_VERSION = _get_cass_version_from_dse(DSE_VERSION.base_version) - CCM_VERSION = DSE_VERSION.base_version -else: # we are testing against Cassandra,DDAC or Scylla - if SCYLLA_VERSION: - cv_string = SCYLLA_VERSION - mcv_string = os.getenv('MAPPED_SCYLLA_VERSION', '3.11.4') # Assume that scylla matches cassandra `3.11.4` behavior - else: - cv_string = os.getenv('CASSANDRA_VERSION', None) - mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) - try: - cassandra_version = Version(cv_string) # env var is set to test-dse for DDAC - except: - # fallback to MAPPED_CASSANDRA_VERSION - cassandra_version = Version(mcv_string) +if SCYLLA_VERSION: + cv_string = SCYLLA_VERSION + mcv_string = os.getenv('MAPPED_SCYLLA_VERSION', '3.11.4') # Assume that scylla matches cassandra `3.11.4` behavior +else: + cv_string = os.getenv('CASSANDRA_VERSION', None) + mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) +try: + cassandra_version = Version(cv_string) # env var is set to test-dse for DDAC +except: + # fallback to MAPPED_CASSANDRA_VERSION + cassandra_version = Version(mcv_string) - CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version - CCM_VERSION = mcv_string if mcv_string else cv_string +CASSANDRA_VERSION = Version(mcv_string) if mcv_string else cassandra_version +CCM_VERSION = mcv_string if mcv_string else cv_string CASSANDRA_IP = os.getenv('CLUSTER_IP', '127.0.0.1') CASSANDRA_DIR = os.getenv('CASSANDRA_DIR', None) CCM_KWARGS = {} -if DSE_VERSION: - log.info('Using DSE version: %s', DSE_VERSION) - if not CASSANDRA_DIR: - CCM_KWARGS['version'] = DSE_VERSION - if DSE_CRED: - log.info("Using DSE credentials file located at {0}".format(DSE_CRED)) - CCM_KWARGS['dse_credentials_file'] = DSE_CRED -elif CASSANDRA_DIR: +if CASSANDRA_DIR: log.info("Using Cassandra dir: %s", CASSANDRA_DIR) CCM_KWARGS['install_dir'] = CASSANDRA_DIR elif os.getenv('SCYLLA_VERSION'): @@ -228,15 +161,9 @@ def _get_dse_version_from_cass(cass_version): def get_default_protocol(): if CASSANDRA_VERSION >= Version('4.0-a'): - if DSE_VERSION: - return ProtocolVersion.DSE_V2 - else: - return ProtocolVersion.V5 + return ProtocolVersion.V5 if CASSANDRA_VERSION >= Version('3.10'): - if DSE_VERSION: - return ProtocolVersion.DSE_V1 - else: - return 4 + return 4 if CASSANDRA_VERSION >= Version('2.2'): return 4 elif CASSANDRA_VERSION >= Version('2.1'): @@ -262,23 +189,14 @@ def get_supported_protocol_versions(): 2.2 -> 4, 3 3.X -> 4, 3 3.10(C*) -> 5(beta),4,3 - 3.10(DSE) -> DSE_V1,4,3 4.0(C*) -> 6(beta),5,4,3 - 4.0(DSE) -> DSE_v2, DSE_V1,4,3 ` """ if CASSANDRA_VERSION >= Version('4.0-beta5'): - if not DSE_VERSION: - return (3, 4, 5, 6) + return (3, 4, 5, 6) if CASSANDRA_VERSION >= Version('4.0-a'): - if DSE_VERSION: - return (3, 4, ProtocolVersion.DSE_V1, ProtocolVersion.DSE_V2) - else: - return (3, 4, 5) + return (3, 4, 5) elif CASSANDRA_VERSION >= Version('3.10'): - if DSE_VERSION: - return (3, 4, ProtocolVersion.DSE_V1) - else: - return (3, 4) + return (3, 4) elif CASSANDRA_VERSION >= Version('3.0'): return (3, 4) elif CASSANDRA_VERSION >= Version('2.2'): @@ -311,15 +229,9 @@ def get_unsupported_upper_protocol(): return 5 if CASSANDRA_VERSION >= Version('4.0-a'): - if DSE_VERSION: - return None - else: - return ProtocolVersion.DSE_V1 + return ProtocolVersion.DSE_V1 if CASSANDRA_VERSION >= Version('3.10'): - if DSE_VERSION: - return ProtocolVersion.DSE_V2 - else: - return 5 + return 5 if CASSANDRA_VERSION >= Version('2.2'): return 5 elif CASSANDRA_VERSION >= Version('2.1'): @@ -364,14 +276,6 @@ def _id_and_mark(f): lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0'), 'Cassandra version less than 4.0 required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') -greaterthanorequaldse68 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.8'), "DSE 6.8 or greater required for this test") -greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") -greaterthanorequaldse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.0'), "DSE 6.0 or greater required for this test") -greaterthanorequaldse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('5.1'), "DSE 5.1 or greater required for this test") -greaterthanorequaldse50 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('5.0'), "DSE 5.0 or greater required for this test") -lessthandse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('5.1'), "DSE version less than 5.1 required") -lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") - # pytest.mark.xfail instead of unittest.expectedFailure because # 1. unittest doesn't skip setUpClass when used on class and we need it sometimes # 2. unittest doesn't have conditional xfail, and I prefer to use pytest than custom decorator @@ -393,10 +297,6 @@ def _id_and_mark(f): requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") -requirecassandra = unittest.skipIf(DSE_VERSION, "Cassandra required") -notdse = unittest.skipIf(DSE_VERSION, "DSE not supported") -requiredse = unittest.skipUnless(DSE_VERSION, "DSE required") -requirescloudproxy = unittest.skipIf(CLOUD_PROXY_PATH is None, "Cloud Proxy path hasn't been specified") libevtest = unittest.skipUnless(EVENT_LOOP_MANAGER=="libev", "Test timing designed for libev loop") @@ -508,15 +408,11 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, configuration_options = configuration_options or {} dse_options = dse_options or {} workloads = workloads or [] - dse_cluster = True if DSE_VERSION else False - if ccm_options is None and DSE_VERSION: - ccm_options = {"version": CCM_VERSION} - elif ccm_options is None: + if ccm_options is None: ccm_options = CCM_KWARGS.copy() cassandra_version = ccm_options.get('version', CCM_VERSION) - dse_version = ccm_options.get('version', DSE_VERSION) global CCM_CLUSTER if USE_CASS_EXTERNAL: @@ -562,88 +458,41 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if os.path.exists(cluster_path): shutil.rmtree(cluster_path) - if dse_cluster: - CCM_CLUSTER = DseCluster(path, cluster_name, **ccm_options) + if SCYLLA_VERSION: + # `experimental: True` enable all experimental features. + # CDC is causing an issue (can't start cluster with multiple seeds) + # Selecting only features we need for tests, i.e. anything but CDC. + CCM_CLUSTER = CCMScyllaCluster(path, cluster_name, **ccm_options) + CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) + + CCM_CLUSTER.set_configuration_options({'skip_wait_for_gossip_to_settle': 0}) + # Permit IS NOT NULL restriction on non-primary key columns of a materialized view + # This allows `test_metadata_with_quoted_identifiers` to run + CCM_CLUSTER.set_configuration_options({'strict_is_not_null_in_views': False}) + else: + ccm_cluster_clz = CCMCluster if Version(cassandra_version) < Version( + '4.1') else Cassandra41CCMCluster + CCM_CLUSTER = ccm_cluster_clz(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - CCM_CLUSTER.set_configuration_options({'batch_size_warn_threshold_in_kb': 5}) - if Version(dse_version) >= Version('5.0'): - CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - if Version(dse_version) >= Version('5.1'): - # For Inet4Address - CCM_CLUSTER.set_dse_configuration_options({ - 'graph': { - 'gremlin_server': { - 'scriptEngines': { - 'gremlin-groovy': { - 'config': { - 'sandbox_rules': { - 'whitelist_packages': ['java.net'] - } - } - } - } - } - } - }) - if 'spark' in workloads: - if Version(dse_version) >= Version('6.8'): - config_options = { - "resource_manager_options": { - "worker_options": { - "cores_total": 0.1, - "memory_total": "64M" - } - } - } + if Version(cassandra_version) >= Version('2.2'): + CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) + if Version(cassandra_version) >= Version('3.0'): + # The config.yml option below is deprecated in C* 4.0 per CASSANDRA-17280 + if Version(cassandra_version) < Version('4.0'): + CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) else: - config_options = {"initial_spark_worker_resources": 0.1} - - if Version(dse_version) >= Version('6.7'): - log.debug("Disabling AlwaysON SQL for a DSE 6.7 Cluster") - config_options['alwayson_sql_options'] = {'enabled': False} - CCM_CLUSTER.set_dse_configuration_options(config_options) - common.switch_cluster(path, cluster_name) - CCM_CLUSTER.set_configuration_options(configuration_options) - CCM_CLUSTER.populate(nodes, ipformat=ipformat) - - CCM_CLUSTER.set_dse_configuration_options(dse_options) - else: - if SCYLLA_VERSION: - # `experimental: True` enable all experimental features. - # CDC is causing an issue (can't start cluster with multiple seeds) - # Selecting only features we need for tests, i.e. anything but CDC. - CCM_CLUSTER = CCMScyllaCluster(path, cluster_name, **ccm_options) - CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) - - CCM_CLUSTER.set_configuration_options({'skip_wait_for_gossip_to_settle': 0}) - # Permit IS NOT NULL restriction on non-primary key columns of a materialized view - # This allows `test_metadata_with_quoted_identifiers` to run - CCM_CLUSTER.set_configuration_options({'strict_is_not_null_in_views': False}) - else: - ccm_cluster_clz = CCMCluster if Version(cassandra_version) < Version( - '4.1') else Cassandra41CCMCluster - CCM_CLUSTER = ccm_cluster_clz(path, cluster_name, **ccm_options) - CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) - if Version(cassandra_version) >= Version('2.2'): - CCM_CLUSTER.set_configuration_options({'enable_user_defined_functions': True}) - if Version(cassandra_version) >= Version('3.0'): - # The config.yml option below is deprecated in C* 4.0 per CASSANDRA-17280 - if Version(cassandra_version) < Version('4.0'): - CCM_CLUSTER.set_configuration_options({'enable_scripted_user_defined_functions': True}) - else: - # Cassandra version >= 4.0 - CCM_CLUSTER.set_configuration_options({ - 'enable_materialized_views': True, - 'enable_sasi_indexes': True, - 'enable_transient_replication': True, - }) - - common.switch_cluster(path, cluster_name) - CCM_CLUSTER.set_configuration_options(configuration_options) - # Since scylla CCM doesn't yet support this options, we skip it - # , use_single_interface=use_single_interface) - CCM_CLUSTER.populate(nodes, ipformat=ipformat) + # Cassandra version >= 4.0 + CCM_CLUSTER.set_configuration_options({ + 'enable_materialized_views': True, + 'enable_sasi_indexes': True, + 'enable_transient_replication': True, + }) + + common.switch_cluster(path, cluster_name) + CCM_CLUSTER.set_configuration_options(configuration_options) + # Since scylla CCM doesn't yet support this options, we skip it + # , use_single_interface=use_single_interface) + CCM_CLUSTER.populate(nodes, ipformat=ipformat) try: jvm_args = [] @@ -1145,4 +994,4 @@ def _get_config_val(self, k, v): def set_configuration_options(self, values=None, *args, **kwargs): new_values = {self._get_config_key(k, str(v)):self._get_config_val(k, str(v)) for (k,v) in values.items()} - super(Cassandra41CCMCluster, self).set_configuration_options(values=new_values, *args, **kwargs) \ No newline at end of file + super(Cassandra41CCMCluster, self).set_configuration_options(values=new_values, *args, **kwargs) diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index dffaccd190..2c9ca172f8 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -11,152 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import unittest - -from urllib.request import build_opener, Request, HTTPHandler -import re -import os -import time -from os.path import expanduser - -from ccmlib import common - -from tests.integration import get_server_versions, BasicKeyspaceUnitTestCase, \ - drop_keyspace_shutdown_cluster, get_node, USE_CASS_EXTERNAL, TestCluster -from tests.integration import use_singledc, use_single_node, wait_for_node_socket, CASSANDRA_IP - -home = expanduser('~') - -# Home directory of the Embedded Apache Directory Server to use -ADS_HOME = os.getenv('ADS_HOME', home) - - -def find_spark_master(session): - - # Iterate over the nodes the one with port 7080 open is the spark master - for host in session.hosts: - ip = host.address - port = 7077 - spark_master = (ip, port) - if common.check_socket_listening(spark_master, timeout=3): - return spark_master[0] - return None - - -def wait_for_spark_workers(num_of_expected_workers, timeout): - """ - This queries the spark master and checks for the expected number of workers - """ - start_time = time.time() - while True: - opener = build_opener(HTTPHandler) - request = Request("http://{0}:7080".format(CASSANDRA_IP)) - request.get_method = lambda: 'GET' - connection = opener.open(request) - match = re.search('Alive Workers:.*(\d+)', connection.read().decode('utf-8')) - num_workers = int(match.group(1)) - if num_workers == num_of_expected_workers: - match = True - break - elif time.time() - start_time > timeout: - match = True - break - time.sleep(1) - return match - - -def use_single_node_with_graph(start=True, options={}, dse_options={}): - use_single_node(start=start, workloads=['graph'], configuration_options=options, dse_options=dse_options) - - -def use_single_node_with_graph_and_spark(start=True, options={}): - use_single_node(start=start, workloads=['graph', 'spark'], configuration_options=options) - - -def use_single_node_with_graph_and_solr(start=True, options={}): - use_single_node(start=start, workloads=['graph', 'solr'], configuration_options=options) - - -def use_singledc_wth_graph(start=True): - use_singledc(start=start, workloads=['graph']) - - -def use_singledc_wth_graph_and_spark(start=True): - use_cluster_with_graph(3) - - -def use_cluster_with_graph(num_nodes): - """ - This is a work around to account for the fact that spark nodes will conflict over master assignment - when started all at once. - """ - if USE_CASS_EXTERNAL: - return - - # Create the cluster but don't start it. - use_singledc(start=False, workloads=['graph', 'spark']) - # Start first node. - get_node(1).start(wait_for_binary_proto=True) - # Wait binary protocol port to open - wait_for_node_socket(get_node(1), 120) - # Wait for spark master to start up - spark_master_http = ("localhost", 7080) - common.check_socket_listening(spark_master_http, timeout=60) - tmp_cluster = TestCluster() - - # Start up remaining nodes. - try: - session = tmp_cluster.connect() - statement = "ALTER KEYSPACE dse_leases WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'dc1': '%d'}" % (num_nodes) - session.execute(statement) - finally: - tmp_cluster.shutdown() - - for i in range(1, num_nodes+1): - if i is not 1: - node = get_node(i) - node.start(wait_for_binary_proto=True) - wait_for_node_socket(node, 120) - - # Wait for workers to show up as Alive on master - wait_for_spark_workers(3, 120) - - -class BasicGeometricUnitTestCase(BasicKeyspaceUnitTestCase): - """ - This base test class is used by all the geomteric tests. It contains class level teardown and setup - methods. It also contains the test fixtures used by those tests - """ - - @classmethod - def common_dse_setup(cls, rf, keyspace_creation=True): - cls.cluster = TestCluster() - cls.session = cls.cluster.connect() - cls.ks_name = cls.__name__.lower() - if keyspace_creation: - cls.create_keyspace(rf) - cls.cass_version, cls.cql_version = get_server_versions() - cls.session.set_keyspace(cls.ks_name) - - @classmethod - def setUpClass(cls): - cls.common_dse_setup(1) - cls.initalizeTables() - - @classmethod - def tearDownClass(cls): - drop_keyspace_shutdown_cluster(cls.ks_name, cls.session, cls.cluster) - - @classmethod - def initalizeTables(cls): - udt_type = "CREATE TYPE udt1 (g {0})".format(cls.cql_type_name) - large_table = "CREATE TABLE tbl (k uuid PRIMARY KEY, g {0}, l list<{0}>, s set<{0}>, m0 map<{0},int>, m1 map, t tuple<{0},{0},{0}>, u frozen)".format( - cls.cql_type_name) - simple_table = "CREATE TABLE tblpk (k {0} primary key, v int)".format(cls.cql_type_name) - cluster_table = "CREATE TABLE tblclustering (k0 int, k1 {0}, v int, primary key (k0, k1))".format( - cls.cql_type_name) - cls.session.execute(udt_type) - cls.session.execute(large_table) - cls.session.execute(simple_table) - cls.session.execute(cluster_table) diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py deleted file mode 100644 index cc40c6906a..0000000000 --- a/tests/integration/advanced/graph/__init__.py +++ /dev/null @@ -1,1195 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import logging -import inspect -from packaging.version import Version -import ipaddress -from uuid import UUID -from decimal import Decimal -import datetime - -from cassandra.util import Point, LineString, Polygon, Duration - -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT -from cassandra.cluster import GraphAnalyticsExecutionProfile, GraphExecutionProfile, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, \ - default_lbp_factory -from cassandra.policies import DSELoadBalancingPolicy - -from cassandra.graph import GraphSON1Deserializer -from cassandra.graph.graphson import InetTypeIO, GraphSON2Deserializer, GraphSON3Deserializer -from cassandra.graph import Edge, Vertex, Path -from cassandra.graph.query import GraphOptions, GraphProtocol, graph_graphson2_row_factory, \ - graph_graphson3_row_factory - -from tests.integration import DSE_VERSION -from tests.integration.advanced import * - - -def setup_module(): - if DSE_VERSION: - dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} - use_single_node_with_graph(dse_options=dse_options) - - -log = logging.getLogger(__name__) - -MAX_LONG = 9223372036854775807 -MIN_LONG = -9223372036854775808 -ZERO_LONG = 0 - -MAKE_STRICT = "schema.config().option('graph.schema_mode').set('production')" -MAKE_NON_STRICT = "schema.config().option('graph.schema_mode').set('development')" -ALLOW_SCANS = "schema.config().option('graph.allow_scan').set('true')" - -deserializer_plus_to_ipaddressv4 = lambda x: ipaddress.IPv4Address(GraphSON1Deserializer.deserialize_inet(x)) -deserializer_plus_to_ipaddressv6 = lambda x: ipaddress.IPv6Address(GraphSON1Deserializer.deserialize_inet(x)) - - -def generic_ip_deserializer(string_ip_address): - if ":" in string_ip_address: - return deserializer_plus_to_ipaddressv6(string_ip_address) - return deserializer_plus_to_ipaddressv4(string_ip_address) - - -class GenericIpAddressIO(InetTypeIO): - @classmethod - def deserialize(cls, value, reader=None): - return generic_ip_deserializer(value) - -GraphSON2Deserializer._deserializers[GenericIpAddressIO.graphson_type] = GenericIpAddressIO -GraphSON3Deserializer._deserializers[GenericIpAddressIO.graphson_type] = GenericIpAddressIO - -if DSE_VERSION: - if DSE_VERSION >= Version('6.8.0'): - CREATE_CLASSIC_GRAPH = "system.graph(name).engine(Classic).create()" - else: - CREATE_CLASSIC_GRAPH = "system.graph(name).create()" - - -def reset_graph(session, graph_name): - ks = list(session.execute( - "SELECT * FROM system_schema.keyspaces WHERE keyspace_name = '{}';".format(graph_name))) - if ks: - try: - session.execute_graph('system.graph(name).drop()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - except: - pass - - session.execute_graph(CREATE_CLASSIC_GRAPH, {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - wait_for_graph_inserted(session, graph_name) - - -def wait_for_graph_inserted(session, graph_name): - count = 0 - exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value - while not exists and count < 50: - time.sleep(1) - exists = session.execute_graph('system.graph(name).exists()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)[0].value - return exists - - -class BasicGraphUnitTestCase(BasicKeyspaceUnitTestCase): - """ - This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear - down - """ - - @property - def graph_name(self): - return self._testMethodName.lower() - - def session_setup(self): - lbp = DSELoadBalancingPolicy(default_lbp_factory()) - - ep_graphson2 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_2_0 - ), - row_factory=graph_graphson2_row_factory) - - ep_graphson3 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_3_0 - ), - row_factory=graph_graphson3_row_factory) - - ep_graphson1 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name - ) - ) - - ep_analytics = GraphAnalyticsExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_source=b'a', - graph_language=b'gremlin-groovy', - graph_name=self.graph_name - ) - ) - - self.cluster = TestCluster(execution_profiles={ - EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, - EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, - "graphson1": ep_graphson1, - "graphson2": ep_graphson2, - "graphson3": ep_graphson3 - }) - - self.session = self.cluster.connect() - self.ks_name = self._testMethodName.lower() - self.cass_version, self.cql_version = get_server_versions() - - def setUp(self): - self.session_setup() - self.reset_graph() - self.clear_schema() - # enable dev and scan modes - self.session.execute_graph(MAKE_NON_STRICT) - self.session.execute_graph(ALLOW_SCANS) - - def tearDown(self): - self.cluster.shutdown() - - def clear_schema(self): - self.session.execute_graph(""" - schema.clear(); - """) - - def reset_graph(self): - reset_graph(self.session, self.graph_name) - - def wait_for_graph_inserted(self): - wait_for_graph_inserted(self.session, self.graph_name) - - def _execute(self, query, graphson, params=None, execution_profile_options=None, **kwargs): - queries = query if isinstance(query, list) else [query] - ep = self.get_execution_profile(graphson) - if execution_profile_options: - ep = self.session.execution_profile_clone_update(ep, **execution_profile_options) - - results = [] - for query in queries: - log.debug(query) - rf = self.session.execute_graph_async(query, parameters=params, execution_profile=ep, **kwargs) - results.append(rf.result()) - self.assertEqual(rf.message.custom_payload['graph-results'], graphson) - - return results[0] if len(results) == 1 else results - - def get_execution_profile(self, graphson, traversal=False): - ep = 'graphson1' - if graphson == GraphProtocol.GRAPHSON_2_0: - ep = 'graphson2' - elif graphson == GraphProtocol.GRAPHSON_3_0: - ep = 'graphson3' - - return ep if traversal is False else 'traversal_' + ep - - def resultset_to_list(self, rs): - results_list = [] - for result in rs: - try: - results_list.append(result.value) - except: - results_list.append(result) - - return results_list - - -class GraphUnitTestCase(BasicKeyspaceUnitTestCase): - - @property - def graph_name(self): - return self._testMethodName.lower() - - def session_setup(self): - lbp = DSELoadBalancingPolicy(default_lbp_factory()) - - ep_graphson2 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_2_0 - ), - row_factory=graph_graphson2_row_factory) - - ep_graphson3 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_3_0 - ), - row_factory=graph_graphson3_row_factory) - - ep_graphson1 = GraphExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_name=self.graph_name, - graph_language='gremlin-groovy' - ) - ) - - ep_analytics = GraphAnalyticsExecutionProfile( - request_timeout=60, - load_balancing_policy=lbp, - graph_options=GraphOptions( - graph_source=b'a', - graph_language=b'gremlin-groovy', - graph_name=self.graph_name - ) - ) - - self.cluster = TestCluster(execution_profiles={ - EXEC_PROFILE_GRAPH_DEFAULT: ep_graphson1, - EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT: ep_analytics, - "graphson1": ep_graphson1, - "graphson2": ep_graphson2, - "graphson3": ep_graphson3 - }) - - self.session = self.cluster.connect() - self.ks_name = self._testMethodName.lower() - self.cass_version, self.cql_version = get_server_versions() - - def setUp(self): - """basic setup only""" - self.session_setup() - - def setup_graph(self, schema): - """Config dependant setup""" - schema.drop_graph(self.session, self.graph_name) - schema.create_graph(self.session, self.graph_name) - schema.clear(self.session) - if schema is ClassicGraphSchema: - # enable dev and scan modes - self.session.execute_graph(MAKE_NON_STRICT) - self.session.execute_graph(ALLOW_SCANS) - - def teardown_graph(self, schema): - schema.drop_graph(self.session, self.graph_name) - - def tearDown(self): - self.cluster.shutdown() - - def execute_graph_queries(self, queries, params=None, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT, - verify_graphson=False, **kwargs): - results = [] - for query in queries: - log.debug(query) - rf = self.session.execute_graph_async(query, parameters=params, - execution_profile=execution_profile, **kwargs) - if verify_graphson: - self.assertEqual(rf.message.custom_payload['graph-results'], verify_graphson) - results.append(rf.result()) - - return results - - def execute_graph(self, query, graphson, params=None, execution_profile_options=None, traversal=False, **kwargs): - queries = query if isinstance(query, list) else [query] - ep = self.get_execution_profile(graphson) - if traversal: - ep = 'traversal_' + ep - if execution_profile_options: - ep = self.session.execution_profile_clone_update(ep, **execution_profile_options) - - results = self.execute_graph_queries(queries, params, ep, verify_graphson=graphson, **kwargs) - - return results[0] if len(results) == 1 else results - - def get_execution_profile(self, graphson, traversal=False): - ep = 'graphson1' - if graphson == GraphProtocol.GRAPHSON_2_0: - ep = 'graphson2' - elif graphson == GraphProtocol.GRAPHSON_3_0: - ep = 'graphson3' - - return ep if traversal is False else 'traversal_' + ep - - def resultset_to_list(self, rs): - results_list = [] - for result in rs: - try: - results_list.append(result.value) - except: - results_list.append(result) - - return results_list - - -class BasicSharedGraphUnitTestCase(BasicKeyspaceUnitTestCase): - """ - This is basic graph unit test case that provides various utility methods that can be leveraged for testcase setup and tear - down - """ - - @classmethod - def session_setup(cls): - cls.cluster = TestCluster() - cls.session = cls.cluster.connect() - cls.ks_name = cls.__name__.lower() - cls.cass_version, cls.cql_version = get_server_versions() - cls.graph_name = cls.__name__.lower() - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - cls.session_setup() - cls.reset_graph() - profiles = cls.cluster.profile_manager.profiles - profiles[EXEC_PROFILE_GRAPH_DEFAULT].request_timeout = 60 - profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = cls.graph_name - profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].request_timeout = 60 - profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT].graph_options.graph_name = cls.graph_name - - @classmethod - def tearDownClass(cls): - if DSE_VERSION: - cls.cluster.shutdown() - - @classmethod - def clear_schema(self): - self.session.execute_graph('schema.clear()') - - @classmethod - def reset_graph(self): - reset_graph(self.session, self.graph_name) - - def wait_for_graph_inserted(self): - wait_for_graph_inserted(self.session, self.graph_name) - - -class GraphFixtures(object): - - @staticmethod - def line(length, single_script=True): - raise NotImplementedError() - - @staticmethod - def classic(): - raise NotImplementedError() - - @staticmethod - def multiple_fields(): - raise NotImplementedError() - - @staticmethod - def large(): - raise NotImplementedError() - - -class ClassicGraphFixtures(GraphFixtures): - - @staticmethod - def datatypes(): - data = { - "boolean1": ["Boolean()", True, None], - "boolean2": ["Boolean()", False, None], - "point1": ["Point()", Point(.5, .13), GraphSON1Deserializer.deserialize_point], - "point2": ["Point()", Point(-5, .0), GraphSON1Deserializer.deserialize_point], - - "linestring1": ["Linestring()", LineString(((1.0, 2.0), (3.0, 4.0), (-89.0, 90.0))), - GraphSON1Deserializer.deserialize_linestring], - "polygon1": ["Polygon()", Polygon([(10.0, 10.0), (80.0, 10.0), (80., 88.0), (10., 89.0), (10., 10.0)], - [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], - [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]]), - GraphSON1Deserializer.deserialize_polygon], - "int1": ["Int()", 2, GraphSON1Deserializer.deserialize_int], - "smallint1": ["Smallint()", 1, GraphSON1Deserializer.deserialize_smallint], - "bigint1": ["Bigint()", MAX_LONG, GraphSON1Deserializer.deserialize_bigint], - "bigint2": ["Bigint()", MIN_LONG, GraphSON1Deserializer.deserialize_bigint], - "bigint3": ["Bigint()", ZERO_LONG, GraphSON1Deserializer.deserialize_bigint], - "varint1": ["Varint()", 2147483647, GraphSON1Deserializer.deserialize_varint], - "int1": ["Int()", 100, GraphSON1Deserializer.deserialize_int], - "float1": ["Float()", 0.3415681, GraphSON1Deserializer.deserialize_float], - "double1": ["Double()", 0.34156811237335205, GraphSON1Deserializer.deserialize_double], - "uuid1": ["Uuid()", UUID('12345678123456781234567812345678'), GraphSON1Deserializer.deserialize_uuid], - "decimal1": ["Decimal()", Decimal(10), GraphSON1Deserializer.deserialize_decimal], - "blob1": ["Blob()", bytearray(b"Hello World"), GraphSON1Deserializer.deserialize_blob], - - "timestamp1": ["Timestamp()", datetime.datetime.utcnow().replace(microsecond=0), - GraphSON1Deserializer.deserialize_timestamp], - "timestamp2": ["Timestamp()", datetime.datetime.max.replace(microsecond=0), - GraphSON1Deserializer.deserialize_timestamp], - # These are valid values but are pending for DSP-14093 to be fixed - #"timestamp3": ["Timestamp()", datetime.datetime(159, 1, 1, 23, 59, 59), - # GraphSON1TypeDeserializer.deserialize_timestamp], - #"timestamp4": ["Timestamp()", datetime.datetime.min, - # GraphSON1TypeDeserializer.deserialize_timestamp], - "inet1": ["Inet()", ipaddress.IPv4Address(u"127.0.0.1"), deserializer_plus_to_ipaddressv4], - "inet2": ["Inet()", ipaddress.IPv6Address(u"2001:db8:85a3:8d3:1319:8a2e:370:7348"), - deserializer_plus_to_ipaddressv6], - "duration1": ["Duration()", datetime.timedelta(1, 16, 0), - GraphSON1Deserializer.deserialize_duration], - "duration2": ["Duration()", datetime.timedelta(days=1, seconds=16, milliseconds=15), - GraphSON1Deserializer.deserialize_duration], - "blob3": ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob], - "blob4": ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] - } - - if DSE_VERSION >= Version("5.1"): - data["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] - data["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] - data["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] - data["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] - data["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] - data["blob5"] = ["Blob()", bytearray(b"AKDLIElksadlaswqA" * 10000), GraphSON1Deserializer.deserialize_blob] - data["datetime1"] = ["Date()", datetime.date.today(), GraphSON1Deserializer.deserialize_date] - data["datetime2"] = ["Date()", datetime.date(159, 1, 3), GraphSON1Deserializer.deserialize_date] - data["datetime3"] = ["Date()", datetime.date.min, GraphSON1Deserializer.deserialize_date] - data["datetime4"] = ["Date()", datetime.date.max, GraphSON1Deserializer.deserialize_date] - data["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] - data["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] - data["time3"] = ["Time()", datetime.time(12, 6), GraphSON1Deserializer.deserialize_time] - data["time4"] = ["Time()", datetime.time.min, GraphSON1Deserializer.deserialize_time] - data["time5"] = ["Time()", datetime.time.max, GraphSON1Deserializer.deserialize_time] - - return data - - @staticmethod - def line(length, single_script=False): - queries = [ALLOW_SCANS + ';', - """schema.propertyKey('index').Int().ifNotExists().create(); - schema.propertyKey('distance').Int().ifNotExists().create(); - schema.vertexLabel('lp').properties('index').ifNotExists().create(); - schema.edgeLabel('goesTo').properties('distance').connection('lp', 'lp').ifNotExists().create();"""] - - vertex_script = ["Vertex vertex0 = graph.addVertex(label, 'lp', 'index', 0);"] - for index in range(1, length): - if not single_script and len(vertex_script) > 25: - queries.append("\n".join(vertex_script)) - vertex_script = [ - "Vertex vertex{pindex} = g.V().hasLabel('lp').has('index', {pindex}).next()".format( - pindex=index-1)] - - vertex_script.append(''' - Vertex vertex{vindex} = graph.addVertex(label, 'lp', 'index', {vindex}); - vertex{pindex}.addEdge('goesTo', vertex{vindex}, 'distance', 5); '''.format( - vindex=index, pindex=index - 1)) - - queries.append("\n".join(vertex_script)) - return queries - - @staticmethod - def classic(): - queries = [ALLOW_SCANS, - '''schema.propertyKey('name').Text().ifNotExists().create(); - schema.propertyKey('age').Int().ifNotExists().create(); - schema.propertyKey('lang').Text().ifNotExists().create(); - schema.propertyKey('weight').Float().ifNotExists().create(); - schema.vertexLabel('person').properties('name', 'age').ifNotExists().create(); - schema.vertexLabel('software').properties('name', 'lang').ifNotExists().create(); - schema.edgeLabel('created').properties('weight').connection('person', 'software').ifNotExists().create(); - schema.edgeLabel('created').connection('software', 'software').add(); - schema.edgeLabel('knows').properties('weight').connection('person', 'person').ifNotExists().create();''', - - '''Vertex marko = graph.addVertex(label, 'person', 'name', 'marko', 'age', 29); - Vertex vadas = graph.addVertex(label, 'person', 'name', 'vadas', 'age', 27); - Vertex lop = graph.addVertex(label, 'software', 'name', 'lop', 'lang', 'java'); - Vertex josh = graph.addVertex(label, 'person', 'name', 'josh', 'age', 32); - Vertex ripple = graph.addVertex(label, 'software', 'name', 'ripple', 'lang', 'java'); - Vertex peter = graph.addVertex(label, 'person', 'name', 'peter', 'age', 35); - Vertex carl = graph.addVertex(label, 'person', 'name', 'carl', 'age', 35); - marko.addEdge('knows', vadas, 'weight', 0.5f); - marko.addEdge('knows', josh, 'weight', 1.0f); - marko.addEdge('created', lop, 'weight', 0.4f); - josh.addEdge('created', ripple, 'weight', 1.0f); - josh.addEdge('created', lop, 'weight', 0.4f); - peter.addEdge('created', lop, 'weight', 0.2f);'''] - - return "\n".join(queries) - - @staticmethod - def multiple_fields(): - query_params = {} - queries= [ALLOW_SCANS, - '''schema.propertyKey('shortvalue').Smallint().ifNotExists().create(); - schema.vertexLabel('shortvertex').properties('shortvalue').ifNotExists().create(); - short s1 = 5000; graph.addVertex(label, "shortvertex", "shortvalue", s1); - schema.propertyKey('intvalue').Int().ifNotExists().create(); - schema.vertexLabel('intvertex').properties('intvalue').ifNotExists().create(); - int i1 = 1000000000; graph.addVertex(label, "intvertex", "intvalue", i1); - schema.propertyKey('intvalue2').Int().ifNotExists().create(); - schema.vertexLabel('intvertex2').properties('intvalue2').ifNotExists().create(); - Integer i2 = 100000000; graph.addVertex(label, "intvertex2", "intvalue2", i2); - schema.propertyKey('longvalue').Bigint().ifNotExists().create(); - schema.vertexLabel('longvertex').properties('longvalue').ifNotExists().create(); - long l1 = 9223372036854775807; graph.addVertex(label, "longvertex", "longvalue", l1); - schema.propertyKey('longvalue2').Bigint().ifNotExists().create(); - schema.vertexLabel('longvertex2').properties('longvalue2').ifNotExists().create(); - Long l2 = 100000000000000000L; graph.addVertex(label, "longvertex2", "longvalue2", l2); - schema.propertyKey('floatvalue').Float().ifNotExists().create(); - schema.vertexLabel('floatvertex').properties('floatvalue').ifNotExists().create(); - float f1 = 3.5f; graph.addVertex(label, "floatvertex", "floatvalue", f1); - schema.propertyKey('doublevalue').Double().ifNotExists().create(); - schema.vertexLabel('doublevertex').properties('doublevalue').ifNotExists().create(); - double d1 = 3.5e40; graph.addVertex(label, "doublevertex", "doublevalue", d1); - schema.propertyKey('doublevalue2').Double().ifNotExists().create(); - schema.vertexLabel('doublevertex2').properties('doublevalue2').ifNotExists().create(); - Double d2 = 3.5e40d; graph.addVertex(label, "doublevertex2", "doublevalue2", d2);'''] - - if DSE_VERSION >= Version('5.1'): - queries.append('''schema.propertyKey('datevalue1').Date().ifNotExists().create(); - schema.vertexLabel('datevertex1').properties('datevalue1').ifNotExists().create(); - schema.propertyKey('negdatevalue2').Date().ifNotExists().create(); - schema.vertexLabel('negdatevertex2').properties('negdatevalue2').ifNotExists().create();''') - - for i in range(1, 4): - queries.append('''schema.propertyKey('timevalue{0}').Time().ifNotExists().create(); - schema.vertexLabel('timevertex{0}').properties('timevalue{0}').ifNotExists().create();'''.format( - i)) - - queries.append('graph.addVertex(label, "datevertex1", "datevalue1", date1);') - query_params['date1'] = '1999-07-29' - - queries.append('graph.addVertex(label, "negdatevertex2", "negdatevalue2", date2);') - query_params['date2'] = '-1999-07-28' - - queries.append('graph.addVertex(label, "timevertex1", "timevalue1", time1);') - query_params['time1'] = '14:02' - queries.append('graph.addVertex(label, "timevertex2", "timevalue2", time2);') - query_params['time2'] = '14:02:20' - queries.append('graph.addVertex(label, "timevertex3", "timevalue3", time3);') - query_params['time3'] = '14:02:20.222' - - return queries, query_params - - @staticmethod - def large(): - query_parts = [''' - int size = 2000; - List ids = new ArrayList(); - schema.propertyKey('ts').Int().single().ifNotExists().create(); - schema.propertyKey('sin').Int().single().ifNotExists().create(); - schema.propertyKey('cos').Int().single().ifNotExists().create(); - schema.propertyKey('ii').Int().single().ifNotExists().create(); - schema.vertexLabel('lcg').properties('ts', 'sin', 'cos', 'ii').ifNotExists().create(); - schema.edgeLabel('linked').connection('lcg', 'lcg').ifNotExists().create(); - Vertex v = graph.addVertex(label, 'lcg'); - v.property("ts", 100001); - v.property("sin", 0); - v.property("cos", 1); - v.property("ii", 0); - ids.add(v.id()); - Random rand = new Random(); - for (int ii = 1; ii < size; ii++) { - v = graph.addVertex(label, 'lcg'); - v.property("ii", ii); - v.property("ts", 100001 + ii); - v.property("sin", Math.sin(ii/5.0)); - v.property("cos", Math.cos(ii/5.0)); - Vertex u = g.V(ids.get(rand.nextInt(ids.size()))).next(); - v.addEdge("linked", u); - ids.add(v.id()); - } - g.V().count();'''] - - return "\n".join(query_parts) - - @staticmethod - def address_book(): - p1 = "Point()" - p2 = "Point()" - if DSE_VERSION >= Version('5.1'): - p1 = "Point().withBounds(-100, -100, 100, 100)" - p2 = "Point().withGeoBounds()" - - queries = [ - ALLOW_SCANS, - "schema.propertyKey('name').Text().ifNotExists().create()", - "schema.propertyKey('pointPropWithBoundsWithSearchIndex').{}.ifNotExists().create()".format(p1), - "schema.propertyKey('pointPropWithBounds').{}.ifNotExists().create()".format(p1), - "schema.propertyKey('pointPropWithGeoBoundsWithSearchIndex').{}.ifNotExists().create()".format(p2), - "schema.propertyKey('pointPropWithGeoBounds').{}.ifNotExists().create()".format(p2), - "schema.propertyKey('city').Text().ifNotExists().create()", - "schema.propertyKey('state').Text().ifNotExists().create()", - "schema.propertyKey('description').Text().ifNotExists().create()", - "schema.vertexLabel('person').properties('name', 'city', 'state', 'description', 'pointPropWithBoundsWithSearchIndex', 'pointPropWithBounds', 'pointPropWithGeoBoundsWithSearchIndex', 'pointPropWithGeoBounds').ifNotExists().create()", - "schema.vertexLabel('person').index('searchPointWithBounds').secondary().by('pointPropWithBounds').ifNotExists().add()", - "schema.vertexLabel('person').index('searchPointWithGeoBounds').secondary().by('pointPropWithGeoBounds').ifNotExists().add()", - - "g.addV('person').property('name', 'Paul Thomas Joe').property('city', 'Rochester').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithBounds', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBounds', Geo.point(-92.46295, 44.0234)).property('description', 'Lives by the hospital').next()", - "g.addV('person').property('name', 'George Bill Steve').property('city', 'Minneapolis').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithBounds', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBounds', Geo.point(-93.266667, 44.093333)).property('description', 'A cold dude').next()", - "g.addV('person').property('name', 'James Paul Smith').property('city', 'Chicago').property('state', 'IL').property('pointPropWithBoundsWithSearchIndex', Geo.point(-87.684722, 41.836944)).property('description', 'Likes to hang out').next()", - "g.addV('person').property('name', 'Jill Alice').property('city', 'Atlanta').property('state', 'GA').property('pointPropWithBoundsWithSearchIndex', Geo.point(-84.39, 33.755)).property('description', 'Enjoys a nice cold coca cola').next()" - ] - - if not Version('5.0') <= DSE_VERSION < Version('5.1'): - queries.append("schema.vertexLabel('person').index('search').search().by('pointPropWithBoundsWithSearchIndex').withError(0.00001, 0.0).by('pointPropWithGeoBoundsWithSearchIndex').withError(0.00001, 0.0).ifNotExists().add()") - - return "\n".join(queries) - - -class CoreGraphFixtures(GraphFixtures): - - @staticmethod - def datatypes(): - data = ClassicGraphFixtures.datatypes() - del data['duration1'] - del data['duration2'] - - # Core Graphs only types - data["map1"] = ["mapOf(Text, Text)", {'test': 'test'}, None] - data["map2"] = ["mapOf(Text, Point)", {'test': Point(.5, .13)}, None] - data["map3"] = ["frozen(mapOf(Int, Varchar))", {42: 'test'}, None] - - data["list1"] = ["listOf(Text)", ['test', 'hello', 'world'], None] - data["list2"] = ["listOf(Int)", [42, 632, 32], None] - data["list3"] = ["listOf(Point)", [Point(.5, .13), Point(42.5, .13)], None] - data["list4"] = ["frozen(listOf(Int))", [42, 55, 33], None] - - data["set1"] = ["setOf(Text)", {'test', 'hello', 'world'}, None] - data["set2"] = ["setOf(Int)", {42, 632, 32}, None] - data["set3"] = ["setOf(Point)", {Point(.5, .13), Point(42.5, .13)}, None] - data["set4"] = ["frozen(setOf(Int))", {42, 55, 33}, None] - - data["tuple1"] = ["tupleOf(Int, Text)", (42, "world"), None] - data["tuple2"] = ["tupleOf(Int, tupleOf(Text, tupleOf(Text, Point)))", (42, ("world", ('this', Point(.5, .13)))), None] - data["tuple3"] = ["tupleOf(Int, tupleOf(Text, frozen(mapOf(Text, Text))))", (42, ("world", {'test': 'test'})), None] - data["tuple4"] = ["tupleOf(Int, tupleOf(Text, frozen(listOf(Int))))", (42, ("world", [65, 89])), None] - data["tuple5"] = ["tupleOf(Int, tupleOf(Text, frozen(setOf(Int))))", (42, ("world", {65, 55})), None] - data["tuple6"] = ["tupleOf(Int, tupleOf(Text, tupleOf(Text, LineString)))", - (42, ("world", ('this', LineString(((1.0, 2.0), (3.0, 4.0), (-89.0, 90.0)))))), None] - - data["tuple7"] = ["tupleOf(Int, tupleOf(Text, tupleOf(Text, Polygon)))", - (42, ("world", ('this', Polygon([(10.0, 10.0), (80.0, 10.0), (80., 88.0), (10., 89.0), (10., 10.0)], - [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], - [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]])))), None] - data["dse_duration1"] = ["Duration()", Duration(42, 12, 10303312), None] - data["dse_duration2"] = ["Duration()", Duration(50, 32, 11), None] - - return data - - @staticmethod - def line(length, single_script=False): - queries = [""" - schema.vertexLabel('lp').ifNotExists().partitionBy('index', Int).create(); - schema.edgeLabel('goesTo').ifNotExists().from('lp').to('lp').property('distance', Int).create(); - """] - - vertex_script = ["g.addV('lp').property('index', 0).next();"] - for index in range(1, length): - if not single_script and len(vertex_script) > 25: - queries.append("\n".join(vertex_script)) - vertex_script = [] - - vertex_script.append(''' - g.addV('lp').property('index', {index}).next(); - g.V().hasLabel('lp').has('index', {pindex}).as('pp').V().hasLabel('lp').has('index', {index}).as('p'). - addE('goesTo').from('pp').to('p').property('distance', 5).next(); - '''.format( - index=index, pindex=index - 1)) - - queries.append("\n".join(vertex_script)) - return queries - - @staticmethod - def classic(): - queries = [ - ''' - schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create(); - schema.vertexLabel('software')ifNotExists().partitionBy('name', Text).property('lang', Text).create(); - schema.edgeLabel('created').ifNotExists().from('person').to('software').property('weight', Double).create(); - schema.edgeLabel('knows').ifNotExists().from('person').to('person').property('weight', Double).create(); - ''', - - ''' - Vertex marko = g.addV('person').property('name', 'marko').property('age', 29).next(); - Vertex vadas = g.addV('person').property('name', 'vadas').property('age', 27).next(); - Vertex lop = g.addV('software').property('name', 'lop').property('lang', 'java').next(); - Vertex josh = g.addV('person').property('name', 'josh').property('age', 32).next(); - Vertex peter = g.addV('person').property('name', 'peter').property('age', 35).next(); - Vertex carl = g.addV('person').property('name', 'carl').property('age', 35).next(); - Vertex ripple = g.addV('software').property('name', 'ripple').property('lang', 'java').next(); - - // TODO, switch to VertexReference and use v.id() - g.V().hasLabel('person').has('name', 'vadas').as('v').V().hasLabel('person').has('name', 'marko').as('m').addE('knows').from('m').to('v').property('weight', 0.5d).next(); - g.V().hasLabel('person').has('name', 'josh').as('j').V().hasLabel('person').has('name', 'marko').as('m').addE('knows').from('m').to('j').property('weight', 1.0d).next(); - g.V().hasLabel('software').has('name', 'lop').as('l').V().hasLabel('person').has('name', 'marko').as('m').addE('created').from('m').to('l').property('weight', 0.4d).next(); - g.V().hasLabel('software').has('name', 'ripple').as('r').V().hasLabel('person').has('name', 'josh').as('j').addE('created').from('j').to('r').property('weight', 1.0d).next(); - g.V().hasLabel('software').has('name', 'lop').as('l').V().hasLabel('person').has('name', 'josh').as('j').addE('created').from('j').to('l').property('weight', 0.4d).next(); - g.V().hasLabel('software').has('name', 'lop').as('l').V().hasLabel('person').has('name', 'peter').as('p').addE('created').from('p').to('l').property('weight', 0.2d).next(); - - '''] - - return queries - - @staticmethod - def multiple_fields(): - ## no generic test currently needs this - raise NotImplementedError() - - @staticmethod - def large(): - query_parts = [ - ''' - schema.vertexLabel('lcg').ifNotExists().partitionBy('ts', Int).property('sin', Double). - property('cos', Double).property('ii', Int).create(); - schema.edgeLabel('linked').ifNotExists().from('lcg').to('lcg').create(); - ''', - - ''' - int size = 2000; - List ids = new ArrayList(); - v = g.addV('lcg').property('ts', 100001).property('sin', 0d).property('cos', 1d).property('ii', 0).next(); - ids.add(v.id()); - Random rand = new Random(); - for (int ii = 1; ii < size; ii++) { - v = g.addV('lcg').property('ts', 100001 + ii).property('sin', Math.sin(ii/5.0)).property('cos', Math.cos(ii/5.0)).property('ii', ii).next(); - - uid = ids.get(rand.nextInt(ids.size())) - g.V(v.id()).as('v').V(uid).as('u').addE('linked').from('v').to('u').next(); - ids.add(v.id()); - } - g.V().count();''' - ] - - return query_parts - - @staticmethod - def address_book(): - queries = [ - "schema.vertexLabel('person').ifNotExists().partitionBy('name', Text)." - "property('pointPropWithBoundsWithSearchIndex', Point)." - "property('pointPropWithBounds', Point)." - "property('pointPropWithGeoBoundsWithSearchIndex', Point)." - "property('pointPropWithGeoBounds', Point)." - "property('city', Text)." - "property('state', Text)." - "property('description', Text).create()", - "schema.vertexLabel('person').searchIndex().by('name').by('pointPropWithBounds').by('pointPropWithGeoBounds').by('description').asText().create()", - "g.addV('person').property('name', 'Paul Thomas Joe').property('city', 'Rochester').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithBounds', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-92.46295, 44.0234)).property('pointPropWithGeoBounds', Geo.point(-92.46295, 44.0234)).property('description', 'Lives by the hospital').next()", - "g.addV('person').property('name', 'George Bill Steve').property('city', 'Minneapolis').property('state', 'MN').property('pointPropWithBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithBounds', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBoundsWithSearchIndex', Geo.point(-93.266667, 44.093333)).property('pointPropWithGeoBounds', Geo.point(-93.266667, 44.093333)).property('description', 'A cold dude').next()", - "g.addV('person').property('name', 'James Paul Smith').property('city', 'Chicago').property('state', 'IL').property('pointPropWithBoundsWithSearchIndex', Geo.point(-87.684722, 41.836944)).property('description', 'Likes to hang out').next()", - "g.addV('person').property('name', 'Jill Alice').property('city', 'Atlanta').property('state', 'GA').property('pointPropWithBoundsWithSearchIndex', Geo.point(-84.39, 33.755)).property('description', 'Enjoys a nice cold coca cola').next()" - ] - - if not Version('5.0') <= DSE_VERSION < Version('5.1'): - queries.append("schema.vertexLabel('person').searchIndex().by('pointPropWithBoundsWithSearchIndex').by('pointPropWithGeoBounds')" - ".by('pointPropWithGeoBoundsWithSearchIndex').create()") - - return queries - - -def validate_classic_vertex(test, vertex): - vertex_props = vertex.properties.keys() - test.assertEqual(len(vertex_props), 2) - test.assertIn('name', vertex_props) - test.assertTrue('lang' in vertex_props or 'age' in vertex_props) - - -def validate_classic_vertex_return_type(test, vertex): - validate_generic_vertex_result_type(vertex) - vertex_props = vertex.properties - test.assertIn('name', vertex_props) - test.assertTrue('lang' in vertex_props or 'age' in vertex_props) - - -def validate_generic_vertex_result_type(test, vertex): - test.assertIsInstance(vertex, Vertex) - for attr in ('id', 'type', 'label', 'properties'): - test.assertIsNotNone(getattr(vertex, attr)) - - -def validate_classic_edge_properties(test, edge_properties): - test.assertEqual(len(edge_properties.keys()), 1) - test.assertIn('weight', edge_properties) - test.assertIsInstance(edge_properties, dict) - - -def validate_classic_edge(test, edge): - validate_generic_edge_result_type(test, edge) - validate_classic_edge_properties(test, edge.properties) - - -def validate_line_edge(test, edge): - validate_generic_edge_result_type(test, edge) - edge_props = edge.properties - test.assertEqual(len(edge_props.keys()), 1) - test.assertIn('distance', edge_props) - - -def validate_generic_edge_result_type(test, edge): - test.assertIsInstance(edge, Edge) - for attr in ('properties', 'outV', 'outVLabel', 'inV', 'inVLabel', 'label', 'type', 'id'): - test.assertIsNotNone(getattr(edge, attr)) - - -def validate_path_result_type(test, path): - test.assertIsInstance(path, Path) - test.assertIsNotNone(path.labels) - for obj in path.objects: - if isinstance(obj, Edge): - validate_classic_edge(test, obj) - elif isinstance(obj, Vertex): - validate_classic_vertex(test, obj) - else: - test.fail("Invalid object found in path " + str(object.type)) - - -class GraphTestConfiguration(object): - """Possible Configurations: - ClassicGraphSchema: - graphson1 - graphson2 - graphson3 - - CoreGraphSchema - graphson3 - """ - - @classmethod - def schemas(cls): - schemas = [ClassicGraphSchema] - if DSE_VERSION >= Version("6.8"): - schemas.append(CoreGraphSchema) - return schemas - - @classmethod - def graphson_versions(cls): - graphson_versions = [GraphProtocol.GRAPHSON_1_0] - if DSE_VERSION >= Version("6.0"): - graphson_versions.append(GraphProtocol.GRAPHSON_2_0) - if DSE_VERSION >= Version("6.8"): - graphson_versions.append(GraphProtocol.GRAPHSON_3_0) - return graphson_versions - - @classmethod - def schema_configurations(cls, schema=None): - schemas = cls.schemas() if schema is None else [schema] - configurations = [] - for s in schemas: - configurations.append(s) - - return configurations - - @classmethod - def configurations(cls, schema=None, graphson=None): - schemas = cls.schemas() if schema is None else [schema] - graphson_versions = cls.graphson_versions() if graphson is None else [graphson] - - configurations = [] - for s in schemas: - for g in graphson_versions: - if s is CoreGraphSchema and g != GraphProtocol.GRAPHSON_3_0: - continue - configurations.append((s, g)) - - return configurations - - @staticmethod - def _make_graph_schema_test_method(func, schema): - def test_input(self): - self.setup_graph(schema) - try: - func(self, schema) - except: - raise - finally: - self.teardown_graph(schema) - - schema_name = 'classic' if schema is ClassicGraphSchema else 'core' - test_input.__name__ = '{func}_{schema}'.format( - func=func.__name__.lstrip('_'), schema=schema_name) - return test_input - - @staticmethod - def _make_graph_test_method(func, schema, graphson): - def test_input(self): - self.setup_graph(schema) - try: - func(self, schema, graphson) - except: - raise - finally: - self.teardown_graph(schema) - - graphson_name = 'graphson1' - if graphson == GraphProtocol.GRAPHSON_2_0: - graphson_name = 'graphson2' - elif graphson == GraphProtocol.GRAPHSON_3_0: - graphson_name = 'graphson3' - - schema_name = 'classic' if schema is ClassicGraphSchema else 'core' - - # avoid keyspace name too long issue - if DSE_VERSION < Version('6.7'): - schema_name = schema_name[0] - graphson_name = 'g' + graphson_name[-1] - - test_input.__name__ = '{func}_{schema}_{graphson}'.format( - func=func.__name__.lstrip('_'), schema=schema_name, graphson=graphson_name) - return test_input - - @classmethod - def generate_tests(cls, schema=None, graphson=None, traversal=False): - """Generate tests for a graph configuration""" - def decorator(klass): - if DSE_VERSION: - predicate = inspect.isfunction - for name, func in inspect.getmembers(klass, predicate=predicate): - if not name.startswith('_test'): - continue - for _schema, _graphson in cls.configurations(schema, graphson): - if traversal and _graphson == GraphProtocol.GRAPHSON_1_0: - continue - test_input = cls._make_graph_test_method(func, _schema, _graphson) - log.debug("Generated test '{}.{}'".format(klass.__name__, test_input.__name__)) - setattr(klass, test_input.__name__, test_input) - return klass - - return decorator - - @classmethod - def generate_schema_tests(cls, schema=None): - """Generate schema tests for a graph configuration""" - def decorator(klass): - if DSE_VERSION: - predicate = inspect.isfunction - for name, func in inspect.getmembers(klass, predicate=predicate): - if not name.startswith('_test'): - continue - for _schema in cls.schema_configurations(schema): - test_input = cls._make_graph_schema_test_method(func, _schema) - log.debug("Generated test '{}.{}'".format(klass.__name__, test_input.__name__)) - setattr(klass, test_input.__name__, test_input) - return klass - - return decorator - - -class VertexLabel(object): - """ - Helper that represents a new VertexLabel: - - VertexLabel(['Int()', 'Float()']) # a vertex with 2 properties named property1 and property2 - VertexLabel([('int1', 'Int()'), 'Float()']) # a vertex with 2 properties named int1 and property1 - """ - - id = 0 - label = None - properties = None - - def __init__(self, properties): - VertexLabel.id += 1 - self.id = VertexLabel.id - self.label = "vertex{}".format(self.id) - self.properties = {'pkid': self.id} - property_count = 0 - for p in properties: - if isinstance(p, tuple): - name, typ = p - else: - property_count += 1 - name = "property-v{}-{}".format(self.id, property_count) - typ = p - self.properties[name] = typ - - @property - def non_pk_properties(self): - return {p: v for p, v in self.properties.items() if p != 'pkid'} - - -class GraphSchema(object): - - has_geo_bounds = DSE_VERSION and DSE_VERSION >= Version('5.1') - fixtures = GraphFixtures - - @classmethod - def sanitize_type(cls, typ): - if typ.lower().startswith("point"): - return cls.sanitize_point_type() - elif typ.lower().startswith("line"): - return cls.sanitize_line_type() - elif typ.lower().startswith("poly"): - return cls.sanitize_polygon_type() - else: - return typ - - @classmethod - def sanitize_point_type(cls): - return "Point().withGeoBounds()" if cls.has_geo_bounds else "Point()" - - @classmethod - def sanitize_line_type(cls): - return "Linestring().withGeoBounds()" if cls.has_geo_bounds else "Linestring()" - - @classmethod - def sanitize_polygon_type(cls): - return "Polygon().withGeoBounds()" if cls.has_geo_bounds else "Polygon()" - - @staticmethod - def drop_graph(session, graph_name): - ks = list(session.execute( - "SELECT * FROM system_schema.keyspaces WHERE keyspace_name = '{}';".format(graph_name))) - if not ks: - return - - try: - session.execute_graph('system.graph(name).drop()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - except: - pass - - @staticmethod - def create_graph(session, graph_name): - raise NotImplementedError() - - @staticmethod - def clear(session): - pass - - @staticmethod - def create_vertex_label(session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - raise NotImplementedError() - - @staticmethod - def add_vertex(session, vertex_label, name, value, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - raise NotImplementedError() - - @classmethod - def ensure_properties(cls, session, obj, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - if not isinstance(obj, (Vertex, Edge)): - return - - # This pre-processing is due to a change in TinkerPop - # properties are not returned automatically anymore - # with some queries. - if not obj.properties: - if isinstance(obj, Edge): - obj.properties = {} - for p in cls.get_edge_properties(session, obj, execution_profile=execution_profile): - obj.properties.update(p) - elif isinstance(obj, Vertex): - obj.properties = { - p.label: p - for p in cls.get_vertex_properties(session, obj, execution_profile=execution_profile) - } - - @staticmethod - def get_vertex_properties(session, vertex, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - return session.execute_graph("g.V(vertex_id).properties().toList()", {'vertex_id': vertex.id}, - execution_profile=execution_profile) - - @staticmethod - def get_edge_properties(session, edge, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - v = session.execute_graph("g.E(edge_id).properties().toList()", {'edge_id': edge.id}, - execution_profile=execution_profile) - return v - - -class ClassicGraphSchema(GraphSchema): - - fixtures = ClassicGraphFixtures - - @staticmethod - def create_graph(session, graph_name): - session.execute_graph(CREATE_CLASSIC_GRAPH, {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - wait_for_graph_inserted(session, graph_name) - - @staticmethod - def clear(session): - session.execute_graph('schema.clear()') - - @classmethod - def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - statements = ["schema.propertyKey('pkid').Int().ifNotExists().create();"] - for k, v in vertex_label.non_pk_properties.items(): - typ = cls.sanitize_type(v) - statements.append("schema.propertyKey('{name}').{type}.create();".format( - name=k, type=typ - )) - - statements.append("schema.vertexLabel('{label}').partitionKey('pkid').properties(".format( - label=vertex_label.label)) - property_names = [name for name in vertex_label.non_pk_properties.keys()] - statements.append(", ".join(["'{}'".format(p) for p in property_names])) - statements.append(").create();") - - to_run = "\n".join(statements) - session.execute_graph(to_run, execution_profile=execution_profile) - - @staticmethod - def add_vertex(session, vertex_label, name, value, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - statement = "g.addV('{label}').property('pkid', {pkid}).property('{property_name}', val);".format( - pkid=vertex_label.id, label=vertex_label.label, property_name=name) - parameters = {'val': value} - return session.execute_graph(statement, parameters, execution_profile=execution_profile) - - -class CoreGraphSchema(GraphSchema): - - fixtures = CoreGraphFixtures - - @classmethod - def sanitize_type(cls, typ): - typ = super(CoreGraphSchema, cls).sanitize_type(typ) - return typ.replace('()', '') - - @classmethod - def sanitize_point_type(cls): - return "Point" - - @classmethod - def sanitize_line_type(cls): - return "LineString" - - @classmethod - def sanitize_polygon_type(cls): - return "Polygon" - - @staticmethod - def create_graph(session, graph_name): - session.execute_graph('system.graph(name).create()', {'name': graph_name}, - execution_profile=EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT) - wait_for_graph_inserted(session, graph_name) - - @classmethod - def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - statements = ["schema.vertexLabel('{label}').partitionBy('pkid', Int)".format( - label=vertex_label.label)] - - for name, typ in vertex_label.non_pk_properties.items(): - typ = cls.sanitize_type(typ) - statements.append(".property('{name}', {type})".format(name=name, type=typ)) - statements.append(".create();") - - to_run = "\n".join(statements) - session.execute_graph(to_run, execution_profile=execution_profile) - - @staticmethod - def add_vertex(session, vertex_label, name, value, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): - statement = "g.addV('{label}').property('pkid', {pkid}).property('{property_name}', val);".format( - pkid=vertex_label.id, label=vertex_label.label, property_name=name) - parameters = {'val': value} - return session.execute_graph(statement, parameters, execution_profile=execution_profile) diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py deleted file mode 100644 index 155de026c5..0000000000 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ /dev/null @@ -1,718 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import datetime -import time -from collections import namedtuple -from packaging.version import Version - -from cassandra.datastax.graph.fluent import DseGraph -from cassandra.graph import VertexProperty, GraphProtocol -from cassandra.util import Point, Polygon, LineString - -from gremlin_python.process.graph_traversal import GraphTraversal, GraphTraversalSource -from gremlin_python.process.traversal import P -from gremlin_python.structure.graph import Edge as TravEdge -from gremlin_python.structure.graph import Vertex as TravVertex, VertexProperty as TravVertexProperty - -from tests.util import wait_until_not_raised -from tests.integration import DSE_VERSION -from tests.integration.advanced.graph import ( - GraphUnitTestCase, ClassicGraphSchema, CoreGraphSchema, - VertexLabel) -from tests.integration import requiredse - -import unittest - - -import ipaddress - - -def check_equality_base(testcase, original, read_value): - if isinstance(original, float): - testcase.assertAlmostEqual(original, read_value, delta=.01) - elif isinstance(original, ipaddress.IPv4Address): - testcase.assertAlmostEqual(original, ipaddress.IPv4Address(read_value)) - elif isinstance(original, ipaddress.IPv6Address): - testcase.assertAlmostEqual(original, ipaddress.IPv6Address(read_value)) - else: - testcase.assertEqual(original, read_value) - - -def create_traversal_profiles(cluster, graph_name): - ep_graphson2 = DseGraph().create_execution_profile( - graph_name, graph_protocol=GraphProtocol.GRAPHSON_2_0) - ep_graphson3 = DseGraph().create_execution_profile( - graph_name, graph_protocol=GraphProtocol.GRAPHSON_3_0) - - cluster.add_execution_profile('traversal_graphson2', ep_graphson2) - cluster.add_execution_profile('traversal_graphson3', ep_graphson3) - - return ep_graphson2, ep_graphson3 - - -class _AbstractTraversalTest(GraphUnitTestCase): - - def setUp(self): - super(_AbstractTraversalTest, self).setUp() - self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) - - def _test_basic_query(self, schema, graphson): - """ - Test to validate that basic graph queries works - - Creates a simple classic tinkerpot graph, and attempts to preform a basic query - using Tinkerpop's GLV with both explicit and implicit execution - ensuring that each one is correct. See reference graph here - http://www.tinkerpop.com/docs/3.0.0.M1/ - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result graph should generate and all vertices and edge results should be - - @test_category dse graph - """ - - g = self.fetch_traversal_source(graphson) - self.execute_graph(schema.fixtures.classic(), graphson) - traversal = g.V().has('name', 'marko').out('knows').values('name') - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn('vadas', results_list) - self.assertIn('josh', results_list) - - def _test_classic_graph(self, schema, graphson): - """ - Test to validate that basic graph generation, and vertex and edges are surfaced correctly - - Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges - using Tinkerpop's GLV with both explicit and implicit execution - ensuring that each one iscorrect. See reference graph here - http://www.tinkerpop.com/docs/3.0.0.M1/ - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result graph should generate and all vertices and edge results should be - - @test_category dse graph - """ - - self.execute_graph(schema.fixtures.classic(), graphson) - ep = self.get_execution_profile(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V() - vert_list = self.execute_traversal(traversal, graphson) - - for vertex in vert_list: - schema.ensure_properties(self.session, vertex, execution_profile=ep) - self._validate_classic_vertex(g, vertex) - traversal = g.E() - edge_list = self.execute_traversal(traversal, graphson) - for edge in edge_list: - schema.ensure_properties(self.session, edge, execution_profile=ep) - self._validate_classic_edge(g, edge) - - def _test_graph_classic_path(self, schema, graphson): - """ - Test to validate that the path version of the result type is generated correctly. It also - tests basic path results as that is not covered elsewhere - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result path object should be unpacked correctly including all nested edges and vertices - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().hasLabel('person').has('name', 'marko').as_('a').outE('knows').inV().as_('c', 'd').outE('created').as_('e', 'f', 'g').inV().path() - path_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(path_list), 2) - for path in path_list: - self._validate_path_result_type(g, path) - - def _test_range_query(self, schema, graphson): - """ - Test to validate range queries are handled correctly. - - Creates a very large line graph script and executes it. Then proceeds to to a range - limited query against it, and ensure that the results are formated correctly and that - the result set is properly sized. - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result result set should be properly formated and properly sized - - @test_category dse graph - """ - - self.execute_graph(schema.fixtures.line(150), graphson) - ep = self.get_execution_profile(graphson) - g = self.fetch_traversal_source(graphson) - - traversal = g.E().range(0, 10) - edges = self.execute_traversal(traversal, graphson) - self.assertEqual(len(edges), 10) - for edge in edges: - schema.ensure_properties(self.session, edge, execution_profile=ep) - self._validate_line_edge(g, edge) - - def _test_result_types(self, schema, graphson): - """ - Test to validate that the edge and vertex version of results are constructed correctly. - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result edge/vertex result types should be unpacked correctly. - @test_category dse graph - """ - self.execute_graph(schema.fixtures.line(150), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V() - vertices = self.execute_traversal(traversal, graphson) - for vertex in vertices: - self._validate_type(g, vertex) - - def _test_large_result_set(self, schema, graphson): - """ - Test to validate that large result sets return correctly. - - Creates a very large graph. Ensures that large result sets are handled appropriately. - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result when limits of result sets are hit errors should be surfaced appropriately - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.large(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V() - vertices = self.execute_traversal(traversal, graphson) - for vertex in vertices: - self._validate_generic_vertex_result_type(g, vertex) - - def _test_vertex_meta_properties(self, schema, graphson): - """ - Test verifying vertex property properties - - @since 1.0.0 - @jira_ticket PYTHON-641 - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') - - s = self.session - s.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();") - s.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();") - s.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();") - s.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();") - s.execute_graph("schema.config().option('graph.allow_scan').set('true');") - v = s.execute_graph('''v = graph.addVertex('MLP') - v.property('key', 'meta_prop', 'k0', 'v0', 'k1', 'v1') - v''')[0] - - g = self.fetch_traversal_source(graphson) - - traversal = g.V() - # This should contain key, and value where value is a property - # This should be a vertex property and should contain sub properties - results = self.execute_traversal(traversal, graphson) - self._validate_meta_property(g, results[0]) - - def _test_vertex_multiple_properties(self, schema, graphson): - """ - Test verifying vertex property form for various Cardinality - - All key types are encoded as a list, regardless of cardinality - - Single cardinality properties have only one value -- the last one added - - Default is single (this is config dependent) - - @since 1.0.0 - @jira_ticket PYTHON-641 - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') - - s = self.session - s.execute_graph('''Schema schema = graph.schema(); - schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); - schema.propertyKey('single_key').Text().single().ifNotExists().create(); - schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); - schema.vertexLabel('MPW2').properties('mult_key').ifNotExists().create(); - schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''') - - mpw1v = s.execute_graph('''v = graph.addVertex('MPW1') - v.property('mult_key', 'value') - v''')[0] - - mpw2v = s.execute_graph('''g.addV('MPW2').property('mult_key', 'value0').property('mult_key', 'value1')''')[0] - - g = self.fetch_traversal_source(graphson) - traversal = g.V(mpw1v.id).properties() - - vertex_props = self.execute_traversal(traversal, graphson) - - self.assertEqual(len(vertex_props), 1) - - self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "mult_key") - self.assertEqual(vertex_props[0].value, "value") - - # multiple_with_two_values - #v = s.execute_graph('''g.addV(label, 'MPW2', 'mult_key', 'value0', 'mult_key', 'value1')''')[0] - traversal = g.V(mpw2v.id).properties() - - vertex_props = self.execute_traversal(traversal, graphson) - - self.assertEqual(len(vertex_props), 2) - self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), 'mult_key') - self.assertEqual(self.fetch_key_from_prop(vertex_props[1]), 'mult_key') - self.assertEqual(vertex_props[0].value, 'value0') - self.assertEqual(vertex_props[1].value, 'value1') - - # single_with_one_value - v = s.execute_graph('''v = graph.addVertex('SW1') - v.property('single_key', 'value') - v''')[0] - traversal = g.V(v.id).properties() - vertex_props = self.execute_traversal(traversal, graphson) - self.assertEqual(len(vertex_props), 1) - self.assertEqual(self.fetch_key_from_prop(vertex_props[0]), "single_key") - self.assertEqual(vertex_props[0].value, "value") - - def should_parse_meta_properties(self): - g = self.fetch_traversal_source() - g.addV("meta_v").property("meta_prop", "hello", "sub_prop", "hi", "sub_prop2", "hi2") - - def _test_all_graph_types_with_schema(self, schema, graphson): - """ - Exhaustively goes through each type that is supported by dse_graph. - creates a vertex for each type using a dse-tinkerpop traversal, - It then attempts to fetch it from the server and compares it to what was inserted - Prime the graph with the correct schema first - - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result inserted objects are equivalent to those retrieved - - @test_category dse graph - """ - self._write_and_read_data_types(schema, graphson) - - def _test_all_graph_types_without_schema(self, schema, graphson): - """ - Exhaustively goes through each type that is supported by dse_graph. - creates a vertex for each type using a dse-tinkerpop traversal, - It then attempts to fetch it from the server and compares it to what was inserted - Do not prime the graph with the correct schema first - @since 1.0.0 - @jira_ticket PYTHON-641 - @expected_result inserted objects are equivalent to those retrieved - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('schema-less is only for classic graphs') - self._write_and_read_data_types(schema, graphson, use_schema=False) - - def _test_dsl(self, schema, graphson): - """ - The test creates a SocialTraversal and a SocialTraversalSource as part of - a DSL. Then calls it's method and checks the results to verify - we have the expected results - - @since @since 1.1.0a1 - @jira_ticket PYTHON-790 - @expected_result only the vertex corresponding to marko is in the result - - @test_category dse graph - """ - class SocialTraversal(GraphTraversal): - def knows(self, person_name): - return self.out("knows").hasLabel("person").has("name", person_name).in_() - - class SocialTraversalSource(GraphTraversalSource): - def __init__(self, *args, **kwargs): - super(SocialTraversalSource, self).__init__(*args, **kwargs) - self.graph_traversal = SocialTraversal - - def people(self, *names): - return self.get_graph_traversal().V().has("name", P.within(*names)) - - self.execute_graph(schema.fixtures.classic(), graphson) - if schema is CoreGraphSchema: - self.execute_graph(""" - schema.edgeLabel('knows').from('person').to('person').materializedView('person__knows__person_by_in_name'). - ifNotExists().partitionBy('in_name').clusterBy('out_name', Asc).create() - """, graphson) - time.sleep(1) # give some time to the MV to be populated - g = self.fetch_traversal_source(graphson, traversal_class=SocialTraversalSource) - - traversal = g.people("marko", "albert").knows("vadas") - results = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results), 1) - only_vertex = results[0] - schema.ensure_properties(self.session, only_vertex, - execution_profile=self.get_execution_profile(graphson)) - self._validate_classic_vertex(g, only_vertex) - - def _test_bulked_results(self, schema, graphson): - """ - Send a query expecting a bulked result and the driver "undoes" - the bulk and returns the expected list - - @since 1.1.0a1 - @jira_ticket PYTHON-771 - @expected_result the expanded list - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - barrier_traversal = g.E().label().barrier() - results = self.execute_traversal(barrier_traversal, graphson) - self.assertEqual(sorted(["created", "created", "created", "created", "knows", "knows"]), sorted(results)) - - def _test_udt_with_classes(self, schema, graphson): - class Address(object): - - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - def __eq__(self, other): - return self.address == other.address and self.city == other.city and self.state == other.state - - class AddressWithTags(object): - - def __init__(self, address, city, state, tags): - self.address = address - self.city = city - self.state = state - self.tags = tags - - def __eq__(self, other): - return (self.address == other.address and self.city == other.city - and self.state == other.state and self.tags == other.tags) - - class ComplexAddress(object): - - def __init__(self, address, address_tags, city, state, props): - self.address = address - self.address_tags = address_tags - self.city = city - self.state = state - self.props = props - - def __eq__(self, other): - return (self.address == other.address and self.address_tags == other.address_tags - and self.city == other.city and self.state == other.state - and self.props == other.props) - - class ComplexAddressWithOwners(object): - - def __init__(self, address, address_tags, city, state, props, owners): - self.address = address - self.address_tags = address_tags - self.city = city - self.state = state - self.props = props - self.owners = owners - - def __eq__(self, other): - return (self.address == other.address and self.address_tags == other.address_tags - and self.city == other.city and self.state == other.state - and self.props == other.props and self.owners == other.owners) - - self.__test_udt(schema, graphson, Address, AddressWithTags, ComplexAddress, ComplexAddressWithOwners) - - def _test_udt_with_namedtuples(self, schema, graphson): - AddressTuple = namedtuple('Address', ('address', 'city', 'state')) - AddressWithTagsTuple = namedtuple('AddressWithTags', ('address', 'city', 'state', 'tags')) - ComplexAddressTuple = namedtuple('ComplexAddress', ('address', 'address_tags', 'city', 'state', 'props')) - ComplexAddressWithOwnersTuple = namedtuple('ComplexAddressWithOwners', ('address', 'address_tags', 'city', - 'state', 'props', 'owners')) - - self.__test_udt(schema, graphson, AddressTuple, AddressWithTagsTuple, - ComplexAddressTuple, ComplexAddressWithOwnersTuple) - - def _write_and_read_data_types(self, schema, graphson, use_schema=True): - g = self.fetch_traversal_source(graphson) - ep = self.get_execution_profile(graphson) - for data in schema.fixtures.datatypes().values(): - typ, value, deserializer = data - vertex_label = VertexLabel([typ]) - property_name = next(iter(vertex_label.non_pk_properties.keys())) - if use_schema or schema is CoreGraphSchema: - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - - write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).\ - property(property_name, value) - self.execute_traversal(write_traversal, graphson) - - read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() - results = self.execute_traversal(read_traversal, graphson) - - for result in results: - if result.label == 'pkid': - continue - self._check_equality(g, value, result.value) - - def __test_udt(self, schema, graphson, address_class, address_with_tags_class, - complex_address_class, complex_address_with_owners_class): - if schema is not CoreGraphSchema or DSE_VERSION < Version('6.8'): - raise unittest.SkipTest("Graph UDT is only supported with DSE 6.8+ and Core graphs.") - - ep = self.get_execution_profile(graphson) - - Address = address_class - AddressWithTags = address_with_tags_class - ComplexAddress = complex_address_class - ComplexAddressWithOwners = complex_address_with_owners_class - - # setup udt - self.session.execute_graph(""" - schema.type('address').property('address', Text).property('city', Text).property('state', Text).create(); - schema.type('addressTags').property('address', Text).property('city', Text).property('state', Text). - property('tags', setOf(Text)).create(); - schema.type('complexAddress').property('address', Text).property('address_tags', frozen(typeOf('addressTags'))). - property('city', Text).property('state', Text).property('props', mapOf(Text, Int)).create(); - schema.type('complexAddressWithOwners').property('address', Text). - property('address_tags', frozen(typeOf('addressTags'))). - property('city', Text).property('state', Text).property('props', mapOf(Text, Int)). - property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); - """, execution_profile=ep) - - # wait max 10 seconds to get the UDT discovered. - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'address', Address), - 1, 10) - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags), - 1, 10) - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress), - 1, 10) - wait_until_not_raised( - lambda: self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners), - 1, 10) - - data = { - "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], - "udt2": ["tupleOf(typeOf('address'), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], - "udt3": ["tupleOf(frozen(typeOf('address')), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], - "udt4": ["tupleOf(tupleOf(Int, typeOf('address')), Text)", - ((42, Address('1440 Rd Smith', 'Quebec', 'QC')), 'hello')], - "udt5": ["tupleOf(tupleOf(Int, typeOf('addressTags')), Text)", - ((42, AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'})), 'hello')], - "udt6": ["tupleOf(tupleOf(Int, typeOf('complexAddress')), Text)", - ((42, ComplexAddress('1440 Rd Smith', - AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'}), - 'Quebec', 'QC', {'p1': 42, 'p2': 33})), 'hello')], - "udt7": ["tupleOf(tupleOf(Int, frozen(typeOf('complexAddressWithOwners'))), Text)", - ((42, ComplexAddressWithOwners( - '1440 Rd Smith', - AddressWithTags('1440 CRd Smith', 'Quebec', 'QC', {'t1', 't2'}), - 'Quebec', 'QC', {'p1': 42, 'p2': 33}, [('Mike', 43), ('Gina', 39)]) - ), 'hello')] - } - - g = self.fetch_traversal_source(graphson) - for typ, value in data.values(): - vertex_label = VertexLabel([typ]) - property_name = next(iter(vertex_label.non_pk_properties.keys())) - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - - write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ - property(property_name, value) - self.execute_traversal(write_traversal, graphson) - - #vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] - #vertex_properties = list(schema.get_vertex_properties( - # self.session, vertex, execution_profile=ep)) - - read_traversal = g.V().hasLabel(str(vertex_label.label)).has(property_name).properties() - vertex_properties = self.execute_traversal(read_traversal, graphson) - - self.assertEqual(len(vertex_properties), 2) # include pkid - for vp in vertex_properties: - if vp.label == 'pkid': - continue - - self.assertIsInstance(vp, (VertexProperty, TravVertexProperty)) - self.assertEqual(vp.label, property_name) - self.assertEqual(vp.value, value) - - @staticmethod - def fetch_edge_props(g, edge): - edge_props = g.E(edge.id).properties().toList() - return edge_props - - @staticmethod - def fetch_vertex_props(g, vertex): - - vertex_props = g.V(vertex.id).properties().toList() - return vertex_props - - def _check_equality(self, g, original, read_value): - return check_equality_base(self, original, read_value) - - -def _validate_prop(key, value, unittest): - if key == 'index': - return - - if any(key.startswith(t) for t in ('int', 'short')): - typ = int - - elif any(key.startswith(t) for t in ('long',)): - if sys.version_info >= (3, 0): - typ = int - else: - typ = long - elif any(key.startswith(t) for t in ('float', 'double')): - typ = float - elif any(key.startswith(t) for t in ('polygon',)): - typ = Polygon - elif any(key.startswith(t) for t in ('point',)): - typ = Point - elif any(key.startswith(t) for t in ('Linestring',)): - typ = LineString - elif any(key.startswith(t) for t in ('neg',)): - typ = str - elif any(key.startswith(t) for t in ('date',)): - typ = datetime.date - elif any(key.startswith(t) for t in ('time',)): - typ = datetime.time - else: - unittest.fail("Received unexpected type: %s" % key) - - -@requiredse -class BaseImplicitExecutionTest(GraphUnitTestCase): - """ - This test class will execute all tests of the AbstractTraversalTestClass using implicit execution - This all traversal will be run directly using toList() - """ - def setUp(self): - super(BaseImplicitExecutionTest, self).setUp() - if DSE_VERSION: - self.ep = DseGraph().create_execution_profile(self.graph_name) - self.cluster.add_execution_profile(self.graph_name, self.ep) - - @staticmethod - def fetch_key_from_prop(property): - return property.key - - def fetch_traversal_source(self, graphson, **kwargs): - ep = self.get_execution_profile(graphson, traversal=True) - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) - - def execute_traversal(self, traversal, graphson=None): - return traversal.toList() - - def _validate_classic_vertex(self, g, vertex): - # Checks the properties on a classic vertex for correctness - vertex_props = self.fetch_vertex_props(g, vertex) - vertex_prop_keys = [vp.key for vp in vertex_props] - self.assertEqual(len(vertex_prop_keys), 2) - self.assertIn('name', vertex_prop_keys) - self.assertTrue('lang' in vertex_prop_keys or 'age' in vertex_prop_keys) - - def _validate_generic_vertex_result_type(self, g, vertex): - # Checks a vertex object for it's generic properties - properties = self.fetch_vertex_props(g, vertex) - for attr in ('id', 'label'): - self.assertIsNotNone(getattr(vertex, attr)) - self.assertTrue(len(properties) > 2) - - def _validate_classic_edge_properties(self, g, edge): - # Checks the properties on a classic edge for correctness - edge_props = self.fetch_edge_props(g, edge) - edge_prop_keys = [ep.key for ep in edge_props] - self.assertEqual(len(edge_prop_keys), 1) - self.assertIn('weight', edge_prop_keys) - - def _validate_classic_edge(self, g, edge): - self._validate_generic_edge_result_type(edge) - self._validate_classic_edge_properties(g, edge) - - def _validate_line_edge(self, g, edge): - self._validate_generic_edge_result_type(edge) - edge_props = self.fetch_edge_props(g, edge) - edge_prop_keys = [ep.key for ep in edge_props] - self.assertEqual(len(edge_prop_keys), 1) - self.assertIn('distance', edge_prop_keys) - - def _validate_generic_edge_result_type(self, edge): - self.assertIsInstance(edge, TravEdge) - - for attr in ('outV', 'inV', 'label', 'id'): - self.assertIsNotNone(getattr(edge, attr)) - - def _validate_path_result_type(self, g, objects_path): - for obj in objects_path: - if isinstance(obj, TravEdge): - self._validate_classic_edge(g, obj) - elif isinstance(obj, TravVertex): - self._validate_classic_vertex(g, obj) - else: - self.fail("Invalid object found in path " + str(obj.type)) - - def _validate_meta_property(self, g, vertex): - meta_props = g.V(vertex.id).properties().toList() - self.assertEqual(len(meta_props), 1) - meta_prop = meta_props[0] - self.assertEqual(meta_prop.value, "meta_prop") - self.assertEqual(meta_prop.key, "key") - - nested_props = g.V(vertex.id).properties().properties().toList() - self.assertEqual(len(nested_props), 2) - for nested_prop in nested_props: - self.assertTrue(nested_prop.key in ['k0', 'k1']) - self.assertTrue(nested_prop.value in ['v0', 'v1']) - - def _validate_type(self, g, vertex): - props = self.fetch_vertex_props(g, vertex) - for prop in props: - value = prop.value - key = prop.key - _validate_prop(key, value, self) - - -class BaseExplicitExecutionTest(GraphUnitTestCase): - - def fetch_traversal_source(self, graphson, **kwargs): - ep = self.get_execution_profile(graphson, traversal=True) - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep, **kwargs) - - def execute_traversal(self, traversal, graphson): - ep = self.get_execution_profile(graphson, traversal=True) - ep = self.session.get_execution_profile(ep) - context = None - if graphson == GraphProtocol.GRAPHSON_3_0: - context = { - 'cluster': self.cluster, - 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None - } - query = DseGraph.query_from_traversal(traversal, graphson, context=context) - # Use an ep that is configured with the correct row factory, and bytecode-json language flat set - result_set = self.execute_graph(query, graphson, traversal=True) - return list(result_set) diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py deleted file mode 100644 index 911e6d5d57..0000000000 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cassandra import cluster -from cassandra.cluster import ContinuousPagingOptions -from cassandra.datastax.graph.fluent import DseGraph -from cassandra.graph import VertexProperty - -from tests.integration import greaterthanorequaldse68 -from tests.integration.advanced.graph import ( - GraphUnitTestCase, ClassicGraphSchema, CoreGraphSchema, - VertexLabel, GraphTestConfiguration -) -from tests.integration import greaterthanorequaldse60 -from tests.integration.advanced.graph.fluent import ( - BaseExplicitExecutionTest, create_traversal_profiles, check_equality_base) - -import unittest - - -@greaterthanorequaldse60 -@GraphTestConfiguration.generate_tests(traversal=True) -class BatchStatementTests(BaseExplicitExecutionTest): - - def setUp(self): - super(BatchStatementTests, self).setUp() - self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) - - def _test_batch_with_schema(self, schema, graphson): - """ - Sends a Batch statement and verifies it has succeeded with a schema created - - @since 1.1.0 - @jira_ticket PYTHON-789 - @expected_result ValueError is arisen - - @test_category dse graph - """ - self._send_batch_and_read_results(schema, graphson) - - def _test_batch_without_schema(self, schema, graphson): - """ - Sends a Batch statement and verifies it has succeeded without a schema created - - @since 1.1.0 - @jira_ticket PYTHON-789 - @expected_result ValueError is arisen - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('schema-less is only for classic graphs') - self._send_batch_and_read_results(schema, graphson, use_schema=False) - - def _test_batch_with_schema_add_all(self, schema, graphson): - """ - Sends a Batch statement and verifies it has succeeded with a schema created. - Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements - instead of :method:`dse_graph.query._BatchGraphStatement.add` - - @since 1.1.0 - @jira_ticket PYTHON-789 - @expected_result ValueError is arisen - - @test_category dse graph - """ - self._send_batch_and_read_results(schema, graphson, add_all=True) - - def _test_batch_without_schema_add_all(self, schema, graphson): - """ - Sends a Batch statement and verifies it has succeeded without a schema created - Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements - instead of :method:`dse_graph.query._BatchGraphStatement.add` - - @since 1.1.0 - @jira_ticket PYTHON-789 - @expected_result ValueError is arisen - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('schema-less is only for classic graphs') - self._send_batch_and_read_results(schema, graphson, add_all=True, use_schema=False) - - def test_only_graph_traversals_are_accepted(self): - """ - Verifies that ValueError is risen if the parameter add is not a traversal - - @since 1.1.0 - @jira_ticket PYTHON-789 - @expected_result ValueError is arisen - - @test_category dse graph - """ - batch = DseGraph.batch() - self.assertRaises(ValueError, batch.add, '{"@value":{"step":[["addV","poc_int"],' - '["property","bigint1value",{"@value":12,"@type":"g:Int32"}]]},' - '"@type":"g:Bytecode"}') - another_batch = DseGraph.batch() - self.assertRaises(ValueError, batch.add, another_batch) - - def _send_batch_and_read_results(self, schema, graphson, add_all=False, use_schema=True): - traversals = [] - datatypes = schema.fixtures.datatypes() - values = {} - g = self.fetch_traversal_source(graphson) - ep = self.get_execution_profile(graphson) - batch = DseGraph.batch(session=self.session, - execution_profile=self.get_execution_profile(graphson, traversal=True)) - for data in datatypes.values(): - typ, value, deserializer = data - vertex_label = VertexLabel([typ]) - property_name = next(iter(vertex_label.non_pk_properties.keys())) - values[property_name] = value - if use_schema or schema is CoreGraphSchema: - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - - traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).property(property_name, value) - if not add_all: - batch.add(traversal) - traversals.append(traversal) - - if add_all: - batch.add_all(traversals) - - self.assertEqual(len(datatypes), len(batch)) - - batch.execute() - - vertices = self.execute_traversal(g.V(), graphson) - self.assertEqual(len(vertices), len(datatypes), "g.V() returned {}".format(vertices)) - - # Iterate over all the vertices and check that they match the original input - for vertex in vertices: - schema.ensure_properties(self.session, vertex, execution_profile=ep) - key = [k for k in list(vertex.properties.keys()) if k != 'pkid'][0].replace("value", "") - original = values[key] - self._check_equality(original, vertex) - - def _check_equality(self, original, vertex): - for key in vertex.properties: - if key == 'pkid': - continue - value = vertex.properties[key].value \ - if isinstance(vertex.properties[key], VertexProperty) else vertex.properties[key][0].value - check_equality_base(self, original, value) - - -class ContinuousPagingOptionsForTests(ContinuousPagingOptions): - def __init__(self, - page_unit=ContinuousPagingOptions.PagingUnit.ROWS, max_pages=1, # max_pages=1 - max_pages_per_second=0, max_queue_size=4): - super(ContinuousPagingOptionsForTests, self).__init__(page_unit, max_pages, max_pages_per_second, - max_queue_size) - - -def reset_paging_options(): - cluster.ContinuousPagingOptions = ContinuousPagingOptions - - -@greaterthanorequaldse68 -@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) -class GraphPagingTest(GraphUnitTestCase): - - def setUp(self): - super(GraphPagingTest, self).setUp() - self.addCleanup(reset_paging_options) - self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) - - def _setup_data(self, schema, graphson): - self.execute_graph( - "schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();", - graphson) - for i in range(100): - self.execute_graph("g.addV('person').property('name', 'batman-{}')".format(i), graphson) - - def _test_cont_paging_is_enabled_by_default(self, schema, graphson): - """ - Test that graph paging is automatically enabled with a >=6.8 cluster. - - @jira_ticket PYTHON-1045 - @expected_result the default continuous paging options are used - - @test_category dse graph - """ - # with traversals... I don't have access to the response future... so this is a hack to ensure paging is on - cluster.ContinuousPagingOptions = ContinuousPagingOptionsForTests - ep = self.get_execution_profile(graphson, traversal=True) - self._setup_data(schema, graphson) - self.session.default_fetch_size = 10 - g = DseGraph.traversal_source(self.session, execution_profile=ep) - results = g.V().toList() - self.assertEqual(len(results), 10) # only 10 results due to our hack - - def _test_cont_paging_can_be_disabled(self, schema, graphson): - """ - Test that graph paging can be disabled. - - @jira_ticket PYTHON-1045 - @expected_result the default continuous paging options are not used - - @test_category dse graph - """ - # with traversals... I don't have access to the response future... so this is a hack to ensure paging is on - cluster.ContinuousPagingOptions = ContinuousPagingOptionsForTests - ep = self.get_execution_profile(graphson, traversal=True) - ep = self.session.execution_profile_clone_update(ep, continuous_paging_options=None) - self._setup_data(schema, graphson) - self.session.default_fetch_size = 10 - g = DseGraph.traversal_source(self.session, execution_profile=ep) - results = g.V().toList() - self.assertEqual(len(results), 100) # 100 results since paging is disabled - - def _test_cont_paging_with_custom_options(self, schema, graphson): - """ - Test that we can specify custom paging options. - - @jira_ticket PYTHON-1045 - @expected_result we get only the desired number of results - - @test_category dse graph - """ - ep = self.get_execution_profile(graphson, traversal=True) - ep = self.session.execution_profile_clone_update(ep, - continuous_paging_options=ContinuousPagingOptions(max_pages=1)) - self._setup_data(schema, graphson) - self.session.default_fetch_size = 10 - g = DseGraph.traversal_source(self.session, execution_profile=ep) - results = g.V().toList() - self.assertEqual(len(results), 10) # only 10 results since paging is disabled diff --git a/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py b/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py deleted file mode 100644 index 1a5846203d..0000000000 --- a/tests/integration/advanced/graph/fluent/test_graph_explicit_execution.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cassandra.graph import Vertex, Edge - -from tests.integration.advanced.graph import ( - validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type, - validate_classic_edge_properties, validate_line_edge, - validate_generic_edge_result_type, validate_path_result_type) - -from tests.integration import requiredse, DSE_VERSION -from tests.integration.advanced import use_single_node_with_graph -from tests.integration.advanced.graph import GraphTestConfiguration -from tests.integration.advanced.graph.fluent import ( - BaseExplicitExecutionTest, _AbstractTraversalTest, _validate_prop) - - -def setup_module(): - if DSE_VERSION: - dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} - use_single_node_with_graph(dse_options=dse_options) - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ExplicitExecutionTest(BaseExplicitExecutionTest, _AbstractTraversalTest): - """ - This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution - All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep. - """ - @staticmethod - def fetch_key_from_prop(property): - return property.label - - def _validate_classic_vertex(self, g, vertex): - validate_classic_vertex(self, vertex) - - def _validate_generic_vertex_result_type(self, g, vertex): - validate_generic_vertex_result_type(self, vertex) - - def _validate_classic_edge_properties(self, g, edge): - validate_classic_edge_properties(self, edge) - - def _validate_classic_edge(self, g, edge): - validate_classic_edge(self, edge) - - def _validate_line_edge(self, g, edge): - validate_line_edge(self, edge) - - def _validate_generic_edge_result_type(self, edge): - validate_generic_edge_result_type(self, edge) - - def _validate_type(self, g, vertex): - for key in vertex.properties: - value = vertex.properties[key][0].value - _validate_prop(key, value, self) - - def _validate_path_result_type(self, g, path_obj): - # This pre-processing is due to a change in TinkerPop - # properties are not returned automatically anymore - # with some queries. - for obj in path_obj.objects: - if not obj.properties: - props = [] - if isinstance(obj, Edge): - obj.properties = { - p.key: p.value - for p in self.fetch_edge_props(g, obj) - } - elif isinstance(obj, Vertex): - obj.properties = { - p.label: p.value - for p in self.fetch_vertex_props(g, obj) - } - - validate_path_result_type(self, path_obj) - - def _validate_meta_property(self, g, vertex): - - self.assertEqual(len(vertex.properties), 1) - self.assertEqual(len(vertex.properties['key']), 1) - p = vertex.properties['key'][0] - self.assertEqual(p.label, 'key') - self.assertEqual(p.value, 'meta_prop') - self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) diff --git a/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py b/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py deleted file mode 100644 index 50e6795867..0000000000 --- a/tests/integration/advanced/graph/fluent/test_graph_implicit_execution.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from concurrent.futures import Future -from cassandra.datastax.graph.fluent import DseGraph - -from tests.integration import requiredse, DSE_VERSION -from tests.integration.advanced import use_single_node_with_graph -from tests.integration.advanced.graph import GraphTestConfiguration -from tests.integration.advanced.graph.fluent import ( - BaseImplicitExecutionTest, create_traversal_profiles, _AbstractTraversalTest) - - -def setup_module(): - if DSE_VERSION: - dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} - use_single_node_with_graph(dse_options=dse_options) - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ImplicitExecutionTest(BaseImplicitExecutionTest, _AbstractTraversalTest): - def _test_iterate_step(self, schema, graphson): - """ - Test to validate that the iterate() step work on all dse versions. - @jira_ticket PYTHON-1155 - @expected_result iterate step works - @test_category dse graph - """ - - g = self.fetch_traversal_source(graphson) - self.execute_graph(schema.fixtures.classic(), graphson) - g.addV('person').property('name', 'Person1').iterate() - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ImplicitAsyncExecutionTest(BaseImplicitExecutionTest): - """ - Test to validate that the traversal async execution works properly. - - @since 3.21.0 - @jira_ticket PYTHON-1129 - - @test_category dse graph - """ - - def setUp(self): - super(ImplicitAsyncExecutionTest, self).setUp() - self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name) - - def _validate_results(self, results): - results = list(results) - self.assertEqual(len(results), 2) - self.assertIn('vadas', results) - self.assertIn('josh', results) - - def _test_promise(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - self._validate_results(traversal_future.result()) - - def _test_promise_error_is_propagated(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - with self.assertRaises(Exception): - traversal_future.result() - - def _test_promise_callback(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = self.fetch_traversal_source(graphson) - future = Future() - - def cb(f): - future.set_result(f.result()) - - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - traversal_future.add_done_callback(cb) - self._validate_results(future.result()) - - def _test_promise_callback_on_error(self, schema, graphson): - self.execute_graph(schema.fixtures.classic(), graphson) - g = DseGraph().traversal_source(self.session, 'wrong_graph', execution_profile=self.ep) - future = Future() - - def cb(f): - try: - f.result() - except Exception as e: - future.set_exception(e) - - traversal_future = g.V().has('name', 'marko').out('knows').values('name').promise() - traversal_future.add_done_callback(cb) - with self.assertRaises(Exception): - future.result() diff --git a/tests/integration/advanced/graph/fluent/test_search.py b/tests/integration/advanced/graph/fluent/test_search.py deleted file mode 100644 index d50016d576..0000000000 --- a/tests/integration/advanced/graph/fluent/test_search.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cassandra.util import Distance -from cassandra import InvalidRequest -from cassandra.graph import GraphProtocol -from cassandra.datastax.graph.fluent import DseGraph -from cassandra.datastax.graph.fluent.predicates import Search, Geo, GeoUnit, CqlCollection - -from tests.integration.advanced import use_single_node_with_graph_and_solr -from tests.integration.advanced.graph import GraphUnitTestCase, CoreGraphSchema, ClassicGraphSchema, GraphTestConfiguration -from tests.integration import greaterthanorequaldse51, DSE_VERSION, requiredse - - -def setup_module(): - if DSE_VERSION: - use_single_node_with_graph_and_solr() - - -class AbstractSearchTest(GraphUnitTestCase): - - def setUp(self): - super(AbstractSearchTest, self).setUp() - self.ep_graphson2 = DseGraph().create_execution_profile(self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_2_0) - self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_3_0) - - self.cluster.add_execution_profile('traversal_graphson2', self.ep_graphson2) - self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) - - def fetch_traversal_source(self, graphson): - ep = self.get_execution_profile(graphson, traversal=True) - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep) - - def _test_search_by_prefix(self, schema, graphson): - """ - Test to validate that solr searches by prefix function. - - @since 1.0.0 - @jira_ticket PYTHON-660 - @expected_result all names starting with Paul should be returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "name", Search.prefix("Paul")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertEqual(results_list[0], "Paul Thomas Joe") - - def _test_search_by_regex(self, schema, graphson): - """ - Test to validate that solr searches by regex function. - - @since 1.0.0 - @jira_ticket PYTHON-660 - @expected_result all names containing Paul should be returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "name", Search.regex(".*Paul.*")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list) - self.assertIn("James Paul Smith", results_list) - - def _test_search_by_token(self, schema, graphson): - """ - Test to validate that solr searches by token. - - @since 1.0.0 - @jira_ticket PYTHON-660 - @expected_result all names with description containing could shoud be returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "description", Search.token("cold")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn("Jill Alice", results_list) - self.assertIn("George Bill Steve", results_list) - - def _test_search_by_token_prefix(self, schema, graphson): - """ - Test to validate that solr searches by token prefix. - - @since 1.0.0 - @jira_ticket PYTHON-660 - @expected_result all names with description containing a token starting with h are returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "description", Search.token_prefix("h")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list) - self.assertIn( "James Paul Smith", results_list) - - def _test_search_by_token_regex(self, schema, graphson): - """ - Test to validate that solr searches by token regex. - - @since 1.0.0 - @jira_ticket PYTHON-660 - @expected_result all names with description containing nice or hospital are returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "description", Search.token_regex("(nice|hospital)")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list ) - self.assertIn( "Jill Alice", results_list ) - - def _assert_in_distance(self, schema, graphson, inside, names): - """ - Helper function that asserts that an exception is arisen if geodetic predicates are used - in cartesian geometry. Also asserts that the expected list is equal to the returned from - the transversal using different search indexes. - """ - def assert_equal_list(L1, L2): - return len(L1) == len(L2) and sorted(L1) == sorted(L2) - - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - - traversal = g.V().has("person", "pointPropWithBoundsWithSearchIndex", inside).values("name") - if schema is ClassicGraphSchema: - # throws an exception because of a SOLR/Search limitation in the indexing process - # may be resolved in the future - self.assertRaises(InvalidRequest, self.execute_traversal, traversal, graphson) - else: - traversal = g.V().has("person", "pointPropWithBoundsWithSearchIndex", inside).values("name") - results_list = self.execute_traversal(traversal, graphson) - assert_equal_list(names, results_list) - - traversal = g.V().has("person", "pointPropWithBounds", inside).values("name") - results_list = self.execute_traversal(traversal, graphson) - assert_equal_list(names, results_list) - - traversal = g.V().has("person", "pointPropWithGeoBoundsWithSearchIndex", inside).values("name") - results_list = self.execute_traversal(traversal, graphson) - assert_equal_list(names, results_list) - - traversal = g.V().has("person", "pointPropWithGeoBounds", inside).values("name") - results_list = self.execute_traversal(traversal, graphson) - assert_equal_list(names, results_list) - - @greaterthanorequaldse51 - def _test_search_by_distance(self, schema, graphson): - """ - Test to validate that solr searches by distance. - - @since 1.0.0 - @jira_ticket PYTHON-660 - @expected_result all names with a geo location within a 2 degree distance of -92,44 are returned - - @test_category dse graph - """ - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92, 44, 2)), - ["Paul Thomas Joe", "George Bill Steve"] - ) - - @greaterthanorequaldse51 - def _test_search_by_distance_meters_units(self, schema, graphson): - """ - Test to validate that solr searches by distance. - - @since 2.0.0 - @jira_ticket PYTHON-698 - @expected_result all names with a geo location within a 56k-meter radius of -92,44 are returned - - @test_category dse graph - """ - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92, 44, 56000), GeoUnit.METERS), - ["Paul Thomas Joe"] - ) - - @greaterthanorequaldse51 - def _test_search_by_distance_miles_units(self, schema, graphson): - """ - Test to validate that solr searches by distance. - - @since 2.0.0 - @jira_ticket PYTHON-698 - @expected_result all names with a geo location within a 70-mile radius of -92,44 are returned - - @test_category dse graph - """ - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92, 44, 70), GeoUnit.MILES), - ["Paul Thomas Joe", "George Bill Steve"] - ) - - @greaterthanorequaldse51 - def _test_search_by_distance_check_limit(self, schema, graphson): - """ - Test to validate that solr searches by distance using several units. It will also validate - that and exception is arisen if geodetic predicates are used against cartesian geometry - - @since 2.0.0 - @jira_ticket PYTHON-698 - @expected_result if the search distance is below the real distance only one - name will be in the list, otherwise, two - - @test_category dse graph - """ - # Paul Thomas Joe and George Bill Steve are 64.6923761881464 km apart - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92.46295, 44.0234, 65), GeoUnit.KILOMETERS), - ["George Bill Steve", "Paul Thomas Joe"] - ) - - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92.46295, 44.0234, 64), GeoUnit.KILOMETERS), - ["Paul Thomas Joe"] - ) - - # Paul Thomas Joe and George Bill Steve are 40.19797892069464 miles apart - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92.46295, 44.0234, 41), GeoUnit.MILES), - ["George Bill Steve", "Paul Thomas Joe"] - ) - - self._assert_in_distance(schema, graphson, - Geo.inside(Distance(-92.46295, 44.0234, 40), GeoUnit.MILES), - ["Paul Thomas Joe"] - ) - - @greaterthanorequaldse51 - def _test_search_by_fuzzy(self, schema, graphson): - """ - Test to validate that solr searches by distance. - - @since 1.0.0 - @jira_ticket PYTHON-664 - @expected_result all names with a geo location within a 2 radius distance of -92,44 are returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "name", Search.fuzzy("Paul Thamas Joe", 1)).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertIn("Paul Thomas Joe", results_list) - - traversal = g.V().has("person", "name", Search.fuzzy("Paul Thames Joe", 1)).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 0) - - @greaterthanorequaldse51 - def _test_search_by_fuzzy_token(self, schema, graphson): - """ - Test to validate that fuzzy searches. - - @since 1.0.0 - @jira_ticket PYTHON-664 - @expected_result all names with that differ from the search criteria by one letter should be returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "description", Search.token_fuzzy("lives", 1)).values("name") - # Should match 'Paul Thomas Joe' since description contains 'Lives' - # Should match 'James Paul Joe' since description contains 'Likes' - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn("Paul Thomas Joe", results_list) - self.assertIn("James Paul Smith", results_list) - - traversal = g.V().has("person", "description", Search.token_fuzzy("loues", 1)).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 0) - - @greaterthanorequaldse51 - def _test_search_by_phrase(self, schema, graphson): - """ - Test to validate that phrase searches. - - @since 1.0.0 - @jira_ticket PYTHON-664 - @expected_result all names with that differ from the search phrase criteria by two letter should be returned - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.address_book(), graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.V().has("person", "description", Search.phrase("a cold", 2)).values("name") - #Should match 'George Bill Steve' since 'A cold dude' is at distance of 0 for 'a cold'. - #Should match 'Jill Alice' since 'Enjoys a very nice cold coca cola' is at distance of 2 for 'a cold'. - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 2) - self.assertIn('George Bill Steve', results_list) - self.assertIn('Jill Alice', results_list) - - traversal = g.V().has("person", "description", Search.phrase("a bald", 2)).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 0) - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ImplicitSearchTest(AbstractSearchTest): - """ - This test class will execute all tests of the AbstractSearchTest using implicit execution - All traversals will be run directly using toList() - """ - def fetch_key_from_prop(self, property): - return property.key - - def execute_traversal(self, traversal, graphson=None): - return traversal.toList() - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True) -class ExplicitSearchTest(AbstractSearchTest): - """ - This test class will execute all tests of the AbstractSearchTest using implicit execution - All traversals will be converted to byte code then they will be executed explicitly. - """ - - def execute_traversal(self, traversal, graphson): - ep = self.get_execution_profile(graphson, traversal=True) - ep = self.session.get_execution_profile(ep) - context = None - if graphson == GraphProtocol.GRAPHSON_3_0: - context = { - 'cluster': self.cluster, - 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None - } - query = DseGraph.query_from_traversal(traversal, graphson, context=context) - #Use an ep that is configured with the correct row factory, and bytecode-json language flat set - result_set = self.execute_graph(query, graphson, traversal=True) - return list(result_set) - - -@requiredse -class BaseCqlCollectionPredicatesTest(GraphUnitTestCase): - - def setUp(self): - super(BaseCqlCollectionPredicatesTest, self).setUp() - self.ep_graphson3 = DseGraph().create_execution_profile(self.graph_name, - graph_protocol=GraphProtocol.GRAPHSON_3_0) - self.cluster.add_execution_profile('traversal_graphson3', self.ep_graphson3) - - def fetch_traversal_source(self, graphson): - ep = self.get_execution_profile(graphson, traversal=True) - return DseGraph().traversal_source(self.session, self.graph_name, execution_profile=ep) - - def setup_vertex_label(self, graphson): - ep = self.get_execution_profile(graphson) - self.session.execute_graph(""" - schema.vertexLabel('cqlcollections').ifNotExists().partitionBy('name', Varchar) - .property('list', listOf(Text)) - .property('frozen_list', frozen(listOf(Text))) - .property('set', setOf(Text)) - .property('frozen_set', frozen(setOf(Text))) - .property('map_keys', mapOf(Int, Text)) - .property('map_values', mapOf(Int, Text)) - .property('map_entries', mapOf(Int, Text)) - .property('frozen_map', frozen(mapOf(Int, Text))) - .create() - """, execution_profile=ep) - - self.session.execute_graph(""" - schema.vertexLabel('cqlcollections').secondaryIndex('list').by('list').create(); - schema.vertexLabel('cqlcollections').secondaryIndex('frozen_list').by('frozen_list').indexFull().create(); - schema.vertexLabel('cqlcollections').secondaryIndex('set').by('set').create(); - schema.vertexLabel('cqlcollections').secondaryIndex('frozen_set').by('frozen_set').indexFull().create(); - schema.vertexLabel('cqlcollections').secondaryIndex('map_keys').by('map_keys').indexKeys().create(); - schema.vertexLabel('cqlcollections').secondaryIndex('map_values').by('map_values').indexValues().create(); - schema.vertexLabel('cqlcollections').secondaryIndex('map_entries').by('map_entries').indexEntries().create(); - schema.vertexLabel('cqlcollections').secondaryIndex('frozen_map').by('frozen_map').indexFull().create(); - """, execution_profile=ep) - - def _test_contains_list(self, schema, graphson): - """ - Test to validate that the cql predicate contains works with list - - @since TODO dse 6.8 - @jira_ticket PYTHON-1039 - @expected_result contains predicate work on a list - - @test_category dse graph - """ - self.setup_vertex_label(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.addV("cqlcollections").property("name", "list1").property("list", ['item1', 'item2']) - self.execute_traversal(traversal, graphson) - traversal = g.addV("cqlcollections").property("name", "list2").property("list", ['item3', 'item4']) - self.execute_traversal(traversal, graphson) - traversal = g.V().has("cqlcollections", "list", CqlCollection.contains("item1")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertIn("list1", results_list) - - def _test_contains_set(self, schema, graphson): - """ - Test to validate that the cql predicate contains works with set - - @since TODO dse 6.8 - @jira_ticket PYTHON-1039 - @expected_result contains predicate work on a set - - @test_category dse graph - """ - self.setup_vertex_label(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.addV("cqlcollections").property("name", "set1").property("set", {'item1', 'item2'}) - self.execute_traversal(traversal, graphson) - traversal = g.addV("cqlcollections").property("name", "set2").property("set", {'item3', 'item4'}) - self.execute_traversal(traversal, graphson) - traversal = g.V().has("cqlcollections", "set", CqlCollection.contains("item1")).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertIn("set1", results_list) - - def _test_contains_key_map(self, schema, graphson): - """ - Test to validate that the cql predicate contains_key works with map - - @since TODO dse 6.8 - @jira_ticket PYTHON-1039 - @expected_result contains_key predicate work on a map - - @test_category dse graph - """ - self.setup_vertex_label(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.addV("cqlcollections").property("name", "map1").property("map_keys", {0: 'item1', 1: 'item2'}) - self.execute_traversal(traversal, graphson) - traversal = g.addV("cqlcollections").property("name", "map2").property("map_keys", {2: 'item3', 3: 'item4'}) - self.execute_traversal(traversal, graphson) - traversal = g.V().has("cqlcollections", "map_keys", CqlCollection.contains_key(0)).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertIn("map1", results_list) - - def _test_contains_value_map(self, schema, graphson): - """ - Test to validate that the cql predicate contains_value works with map - - @since TODO dse 6.8 - @jira_ticket PYTHON-1039 - @expected_result contains_value predicate work on a map - - @test_category dse graph - """ - self.setup_vertex_label(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.addV("cqlcollections").property("name", "map1").property("map_values", {0: 'item1', 1: 'item2'}) - self.execute_traversal(traversal, graphson) - traversal = g.addV("cqlcollections").property("name", "map2").property("map_values", {2: 'item3', 3: 'item4'}) - self.execute_traversal(traversal, graphson) - traversal = g.V().has("cqlcollections", "map_values", CqlCollection.contains_value('item3')).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertIn("map2", results_list) - - def _test_entry_eq_map(self, schema, graphson): - """ - Test to validate that the cql predicate entry_eq works with map - - @since TODO dse 6.8 - @jira_ticket PYTHON-1039 - @expected_result entry_eq predicate work on a map - - @test_category dse graph - """ - self.setup_vertex_label(graphson) - g = self.fetch_traversal_source(graphson) - traversal = g.addV("cqlcollections").property("name", "map1").property("map_entries", {0: 'item1', 1: 'item2'}) - self.execute_traversal(traversal, graphson) - traversal = g.addV("cqlcollections").property("name", "map2").property("map_entries", {2: 'item3', 3: 'item4'}) - self.execute_traversal(traversal, graphson) - traversal = g.V().has("cqlcollections", "map_entries", CqlCollection.entry_eq([2, 'item3'])).values("name") - results_list = self.execute_traversal(traversal, graphson) - self.assertEqual(len(results_list), 1) - self.assertIn("map2", results_list) - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True, schema=CoreGraphSchema) -class ImplicitCqlCollectionPredicatesTest(BaseCqlCollectionPredicatesTest): - """ - This test class will execute all tests of the BaseCqlCollectionTest using implicit execution - All traversals will be run directly using toList() - """ - - def execute_traversal(self, traversal, graphson=None): - return traversal.toList() - - -@requiredse -@GraphTestConfiguration.generate_tests(traversal=True, schema=CoreGraphSchema) -class ExplicitCqlCollectionPredicatesTest(BaseCqlCollectionPredicatesTest): - """ - This test class will execute all tests of the AbstractSearchTest using implicit execution - All traversals will be converted to byte code then they will be executed explicitly. - """ - - def execute_traversal(self, traversal, graphson): - ep = self.get_execution_profile(graphson, traversal=True) - ep = self.session.get_execution_profile(ep) - context = None - if graphson == GraphProtocol.GRAPHSON_3_0: - context = { - 'cluster': self.cluster, - 'graph_name': ep.graph_options.graph_name.decode('utf-8') if ep.graph_options.graph_name else None - } - query = DseGraph.query_from_traversal(traversal, graphson, context=context) - result_set = self.execute_graph(query, graphson, traversal=True) - return list(result_set) diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py deleted file mode 100644 index 7f55229911..0000000000 --- a/tests/integration/advanced/graph/test_graph.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from cassandra import OperationTimedOut, InvalidRequest -from cassandra.protocol import SyntaxException -from cassandra.policies import WhiteListRoundRobinPolicy -from cassandra.cluster import NoHostAvailable -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, GraphExecutionProfile -from cassandra.graph import single_object_row_factory, Vertex, graph_object_row_factory, \ - graph_graphson2_row_factory, graph_graphson3_row_factory -from cassandra.util import SortedSet - -from tests.integration import DSE_VERSION, greaterthanorequaldse51, greaterthanorequaldse68, \ - requiredse, TestCluster -from tests.integration.advanced.graph import BasicGraphUnitTestCase, GraphUnitTestCase, \ - GraphProtocol, ClassicGraphSchema, CoreGraphSchema, use_single_node_with_graph - - -def setup_module(): - if DSE_VERSION: - dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}} - use_single_node_with_graph(dse_options=dse_options) - - -@requiredse -class GraphTimeoutTests(BasicGraphUnitTestCase): - - def test_should_wait_indefinitely_by_default(self): - """ - Tests that by default the client should wait indefinitely for server timeouts - - @since 1.0.0 - @jira_ticket PYTHON-589 - - @test_category dse graph - """ - desired_timeout = 1000 - - graph_source = "test_timeout_1" - ep_name = graph_source - ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) - ep.graph_options = ep.graph_options.copy() - ep.graph_options.graph_source = graph_source - self.cluster.add_execution_profile(ep_name, ep) - - to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format( - graph_source, desired_timeout) - self.session.execute_graph(to_run, execution_profile=ep_name) - with self.assertRaises(InvalidRequest) as ir: - self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", - execution_profile=ep_name) - self.assertTrue("evaluation exceeded the configured threshold of 1000" in str(ir.exception) or - "evaluation exceeded the configured threshold of evaluation_timeout at 1000" in str( - ir.exception)) - - def test_request_timeout_less_then_server(self): - """ - Tests that with explicit request_timeouts set, that a server timeout is honored if it's relieved prior to the - client timeout - - @since 1.0.0 - @jira_ticket PYTHON-589 - - @test_category dse graph - """ - desired_timeout = 1000 - graph_source = "test_timeout_2" - ep_name = graph_source - ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=32) - ep.graph_options = ep.graph_options.copy() - ep.graph_options.graph_source = graph_source - self.cluster.add_execution_profile(ep_name, ep) - - to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format( - graph_source, desired_timeout) - self.session.execute_graph(to_run, execution_profile=ep_name) - with self.assertRaises(InvalidRequest) as ir: - self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", - execution_profile=ep_name) - self.assertTrue("evaluation exceeded the configured threshold of 1000" in str(ir.exception) or - "evaluation exceeded the configured threshold of evaluation_timeout at 1000" in str( - ir.exception)) - - def test_server_timeout_less_then_request(self): - """ - Tests that with explicit request_timeouts set, that a client timeout is honored if it's triggered prior to the - server sending a timeout. - - @since 1.0.0 - @jira_ticket PYTHON-589 - - @test_category dse graph - """ - graph_source = "test_timeout_3" - ep_name = graph_source - ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, request_timeout=1) - ep.graph_options = ep.graph_options.copy() - ep.graph_options.graph_source = graph_source - self.cluster.add_execution_profile(ep_name, ep) - server_timeout = 10000 - to_run = '''graph.schema().config().option("graph.traversal_sources.{0}.evaluation_timeout").set('{1} ms')'''.format( - graph_source, server_timeout) - self.session.execute_graph(to_run, execution_profile=ep_name) - - with self.assertRaises(Exception) as e: - self.session.execute_graph("java.util.concurrent.TimeUnit.MILLISECONDS.sleep(35000L);1+1", - execution_profile=ep_name) - self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) - - -@requiredse -class GraphProfileTests(BasicGraphUnitTestCase): - def test_graph_profile(self): - """ - Test verifying various aspects of graph config properties. - - @since 1.0.0 - @jira_ticket PYTHON-570 - - @test_category dse graph - """ - hosts = self.cluster.metadata.all_hosts() - first_host = hosts[0].address - second_hosts = "1.2.3.4" - - self._execute(ClassicGraphSchema.fixtures.classic(), graphson=GraphProtocol.GRAPHSON_1_0) - # Create various execution policies - exec_dif_factory = GraphExecutionProfile(row_factory=single_object_row_factory) - exec_dif_factory.graph_options.graph_name = self.graph_name - exec_dif_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([first_host])) - exec_dif_lbp.graph_options.graph_name = self.graph_name - exec_bad_lbp = GraphExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([second_hosts])) - exec_dif_lbp.graph_options.graph_name = self.graph_name - exec_short_timeout = GraphExecutionProfile(request_timeout=1, - load_balancing_policy=WhiteListRoundRobinPolicy([first_host])) - exec_short_timeout.graph_options.graph_name = self.graph_name - - # Add a single execution policy on cluster creation - local_cluster = TestCluster(execution_profiles={"exec_dif_factory": exec_dif_factory}) - local_session = local_cluster.connect() - self.addCleanup(local_cluster.shutdown) - - rs1 = self.session.execute_graph('g.V()') - rs2 = local_session.execute_graph('g.V()', execution_profile='exec_dif_factory') - - # Verify default and non default policy works - self.assertFalse(isinstance(rs2[0], Vertex)) - self.assertTrue(isinstance(rs1[0], Vertex)) - # Add other policies validate that lbp are honored - local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp) - local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp") - local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp) - with self.assertRaises(NoHostAvailable): - local_session.execute_graph('g.V()', execution_profile="exec_bad_lbp") - - # Try with missing EP - with self.assertRaises(ValueError): - local_session.execute_graph('g.V()', execution_profile='bad_exec_profile') - - # Validate that timeout is honored - local_cluster.add_execution_profile("exec_short_timeout", exec_short_timeout) - with self.assertRaises(Exception) as e: - self.assertTrue(isinstance(e, InvalidRequest) or isinstance(e, OperationTimedOut)) - local_session.execute_graph('java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);', - execution_profile='exec_short_timeout') - - -@requiredse -class GraphMetadataTest(BasicGraphUnitTestCase): - - @greaterthanorequaldse51 - def test_dse_workloads(self): - """ - Test to ensure dse_workloads is populated appropriately. - Field added in DSE 5.1 - - @since DSE 2.0 - @jira_ticket PYTHON-667 - @expected_result dse_workloads set is set on host model - - @test_category metadata - """ - for host in self.cluster.metadata.all_hosts(): - self.assertIsInstance(host.dse_workloads, SortedSet) - self.assertIn("Cassandra", host.dse_workloads) - self.assertIn("Graph", host.dse_workloads) - - -@requiredse -class GraphExecutionProfileOptionsResolveTest(GraphUnitTestCase): - """ - Test that the execution profile options are properly resolved for graph queries. - - @since DSE 6.8 - @jira_ticket PYTHON-1004 PYTHON-1056 - @expected_result execution profile options are properly determined following the rules. - """ - - def test_default_options(self): - ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) - self.assertEqual(ep.graph_options.graph_protocol, None) - self.assertEqual(ep.row_factory, None) - self.session._resolve_execution_profile_options(ep) - self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_1_0) - self.assertEqual(ep.row_factory, graph_object_row_factory) - - def test_default_options_when_not_groovy(self): - ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) - self.assertEqual(ep.graph_options.graph_protocol, None) - self.assertEqual(ep.row_factory, None) - ep.graph_options.graph_language = 'whatever' - self.session._resolve_execution_profile_options(ep) - self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_2_0) - self.assertEqual(ep.row_factory, graph_graphson2_row_factory) - - def test_default_options_when_explicitly_specified(self): - ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) - self.assertEqual(ep.graph_options.graph_protocol, None) - self.assertEqual(ep.row_factory, None) - obj = object() - ep.graph_options.graph_protocol = obj - ep.row_factory = obj - self.session._resolve_execution_profile_options(ep) - self.assertEqual(ep.graph_options.graph_protocol, obj) - self.assertEqual(ep.row_factory, obj) - - @greaterthanorequaldse68 - def test_graph_protocol_default_for_core_is_graphson3(self): - """Test that graphson3 is automatically resolved for a core graph query""" - self.setup_graph(CoreGraphSchema) - ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) - self.assertEqual(ep.graph_options.graph_protocol, None) - self.assertEqual(ep.row_factory, None) - # Ensure we have the graph metadata - self.session.cluster.refresh_schema_metadata() - self.session._resolve_execution_profile_options(ep) - self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_3_0) - self.assertEqual(ep.row_factory, graph_graphson3_row_factory) - - self.execute_graph_queries(CoreGraphSchema.fixtures.classic(), verify_graphson=GraphProtocol.GRAPHSON_3_0) - - @greaterthanorequaldse68 - def test_graph_protocol_default_for_core_fallback_to_graphson1_if_no_graph_name(self): - """Test that graphson1 is set when we cannot detect if it's a core graph""" - self.setup_graph(CoreGraphSchema) - default_ep = self.session.get_execution_profile(EXEC_PROFILE_GRAPH_DEFAULT) - graph_options = default_ep.graph_options.copy() - graph_options.graph_name = None - ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, graph_options=graph_options) - self.session._resolve_execution_profile_options(ep) - self.assertEqual(ep.graph_options.graph_protocol, GraphProtocol.GRAPHSON_1_0) - self.assertEqual(ep.row_factory, graph_object_row_factory) - - regex = re.compile(".*Variable.*is unknown.*", re.S) - with self.assertRaisesRegex(SyntaxException, regex): - self.execute_graph_queries(CoreGraphSchema.fixtures.classic(), - execution_profile=ep, verify_graphson=GraphProtocol.GRAPHSON_1_0) diff --git a/tests/integration/advanced/graph/test_graph_cont_paging.py b/tests/integration/advanced/graph/test_graph_cont_paging.py deleted file mode 100644 index 065d01d939..0000000000 --- a/tests/integration/advanced/graph/test_graph_cont_paging.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from cassandra.cluster import ContinuousPagingOptions - -from tests.integration import greaterthanorequaldse68 -from tests.integration.advanced.graph import GraphUnitTestCase, CoreGraphSchema, GraphTestConfiguration - - -@greaterthanorequaldse68 -@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) -class GraphPagingTest(GraphUnitTestCase): - - def _setup_data(self, schema, graphson): - self.execute_graph("schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();", graphson) - for i in range(100): - self.execute_graph("g.addV('person').property('name', 'batman-{}')".format(i), graphson) - - def _test_cont_paging_is_enabled_by_default(self, schema, graphson): - """ - Test that graph paging is automatically enabled with a >=6.8 cluster. - - @jira_ticket PYTHON-1045 - @expected_result the response future has a continuous_paging_session since graph paging is enabled - - @test_category dse graph - """ - ep = self.get_execution_profile(graphson) - self._setup_data(schema, graphson) - rf = self.session.execute_graph_async("g.V()", execution_profile=ep) - results = list(rf.result()) - self.assertIsNotNone(rf._continuous_paging_session) - self.assertEqual(len(results), 100) - - def _test_cont_paging_can_be_disabled(self, schema, graphson): - """ - Test that graph paging can be disabled. - - @jira_ticket PYTHON-1045 - @expected_result the response future doesn't have a continuous_paging_session since graph paging is disabled - - @test_category dse graph - """ - ep = self.get_execution_profile(graphson) - new_ep = self.session.execution_profile_clone_update(ep, continuous_paging_options=None) - self._setup_data(schema, graphson) - rf = self.session.execute_graph_async("g.V()", execution_profile=new_ep) - results = list(rf.result()) - self.assertIsNone(rf._continuous_paging_session) - self.assertEqual(len(results), 100) - - def _test_cont_paging_with_custom_options(self, schema, graphson): - """ - Test that we can specify custom paging options. - - @jira_ticket PYTHON-1045 - @expected_result we get only the desired number of results - - @test_category dse graph - """ - ep = self.get_execution_profile(graphson) - new_ep = self.session.execution_profile_clone_update( - ep, continuous_paging_options=ContinuousPagingOptions(max_pages=1)) - self._setup_data(schema, graphson) - self.session.default_fetch_size = 10 - results = list(self.session.execute_graph("g.V()", execution_profile=new_ep)) - self.assertEqual(len(results), 10) diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py deleted file mode 100644 index 8a261c94d9..0000000000 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import time -import logging -from packaging.version import Version -from collections import namedtuple - -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT -from cassandra.graph import graph_result_row_factory -from cassandra.graph.query import GraphProtocol -from cassandra.graph.types import VertexProperty - -from tests.util import wait_until -from tests.integration.advanced.graph import BasicGraphUnitTestCase, ClassicGraphFixtures, \ - ClassicGraphSchema, CoreGraphSchema -from tests.integration.advanced.graph import VertexLabel, GraphTestConfiguration, GraphUnitTestCase -from tests.integration import DSE_VERSION, requiredse - -log = logging.getLogger(__name__) - - -@requiredse -class GraphBasicDataTypesTests(BasicGraphUnitTestCase): - - def test_result_types(self): - """ - Test to validate that the edge and vertex version of results are constructed correctly. - - @since 1.0.0 - @jira_ticket PYTHON-479 - @expected_result edge/vertex result types should be unpacked correctly. - @test_category dse graph - """ - queries, params = ClassicGraphFixtures.multiple_fields() - for query in queries: - self.session.execute_graph(query, params) - - prof = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, row_factory=graph_result_row_factory) # requires simplified row factory to avoid shedding id/~type information used for validation below - rs = self.session.execute_graph("g.V()", execution_profile=prof) - - for result in rs: - self._validate_type(result) - - def _validate_type(self, vertex): - for properties in vertex.properties.values(): - prop = properties[0] - - if DSE_VERSION >= Version("5.1"): - type_indicator = prop['id']['~label'] - else: - type_indicator = prop['id']['~type'] - - if any(type_indicator.startswith(t) for t in - ('int', 'short', 'long', 'bigint', 'decimal', 'smallint', 'varint')): - typ = int - elif any(type_indicator.startswith(t) for t in ('float', 'double')): - typ = float - elif any(type_indicator.startswith(t) for t in ('duration', 'date', 'negdate', 'time', - 'blob', 'timestamp', 'point', 'linestring', 'polygon', - 'inet', 'uuid')): - typ = str - else: - pass - self.fail("Received unexpected type: %s" % type_indicator) - self.assertIsInstance(prop['value'], typ) - - -class GenericGraphDataTypeTest(GraphUnitTestCase): - - def _test_all_datatypes(self, schema, graphson): - ep = self.get_execution_profile(graphson) - - for data in schema.fixtures.datatypes().values(): - typ, value, deserializer = data - vertex_label = VertexLabel([typ]) - property_name = next(iter(vertex_label.non_pk_properties.keys())) - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] - - def get_vertex_properties(): - return list(schema.get_vertex_properties( - self.session, vertex, execution_profile=ep)) - - prop_returned = 1 if DSE_VERSION < Version('5.1') else 2 # include pkid >=5.1 - wait_until( - lambda: len(get_vertex_properties()) == prop_returned, 0.2, 15) - - vertex_properties = get_vertex_properties() - if graphson == GraphProtocol.GRAPHSON_1_0: - vertex_properties = [vp.as_vertex_property() for vp in vertex_properties] - - for vp in vertex_properties: - if vp.label == 'pkid': - continue - - self.assertIsInstance(vp, VertexProperty) - self.assertEqual(vp.label, property_name) - if graphson == GraphProtocol.GRAPHSON_1_0: - deserialized_value = deserializer(vp.value) if deserializer else vp.value - self.assertEqual(deserialized_value, value) - else: - self.assertEqual(vp.value, value) - - def __test_udt(self, schema, graphson, address_class, address_with_tags_class, - complex_address_class, complex_address_with_owners_class): - if schema is not CoreGraphSchema or DSE_VERSION < Version('6.8'): - raise unittest.SkipTest("Graph UDT is only supported with DSE 6.8+ and Core graphs.") - - ep = self.get_execution_profile(graphson) - - Address = address_class - AddressWithTags = address_with_tags_class - ComplexAddress = complex_address_class - ComplexAddressWithOwners = complex_address_with_owners_class - - # setup udt - self.session.execute_graph(""" - schema.type('address').property('address', Text).property('city', Text).property('state', Text).create(); - schema.type('addressTags').property('address', Text).property('city', Text).property('state', Text). - property('tags', setOf(Text)).create(); - schema.type('complexAddress').property('address', Text).property('address_tags', frozen(typeOf('addressTags'))). - property('city', Text).property('state', Text).property('props', mapOf(Text, Int)).create(); - schema.type('complexAddressWithOwners').property('address', Text). - property('address_tags', frozen(typeOf('addressTags'))). - property('city', Text).property('state', Text).property('props', mapOf(Text, Int)). - property('owners', frozen(listOf(tupleOf(Text, Int)))).create(); - """, execution_profile=ep) - - time.sleep(2) # wait the UDT to be discovered - self.session.cluster.register_user_type(self.graph_name, 'address', Address) - self.session.cluster.register_user_type(self.graph_name, 'addressTags', AddressWithTags) - self.session.cluster.register_user_type(self.graph_name, 'complexAddress', ComplexAddress) - self.session.cluster.register_user_type(self.graph_name, 'complexAddressWithOwners', ComplexAddressWithOwners) - - data = { - "udt1": ["typeOf('address')", Address('1440 Rd Smith', 'Quebec', 'QC')], - "udt2": ["tupleOf(typeOf('address'), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], - "udt3": ["tupleOf(frozen(typeOf('address')), Text)", (Address('1440 Rd Smith', 'Quebec', 'QC'), 'hello')], - "udt4": ["tupleOf(tupleOf(Int, typeOf('address')), Text)", - ((42, Address('1440 Rd Smith', 'Quebec', 'QC')), 'hello')], - "udt5": ["tupleOf(tupleOf(Int, typeOf('addressTags')), Text)", - ((42, AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'})), 'hello')], - "udt6": ["tupleOf(tupleOf(Int, typeOf('complexAddress')), Text)", - ((42, ComplexAddress('1440 Rd Smith', - AddressWithTags('1440 Rd Smith', 'Quebec', 'QC', {'t1', 't2'}), - 'Quebec', 'QC', {'p1': 42, 'p2': 33})), 'hello')], - "udt7": ["tupleOf(tupleOf(Int, frozen(typeOf('complexAddressWithOwners'))), Text)", - ((42, ComplexAddressWithOwners( - '1440 Rd Smith', - AddressWithTags('1440 CRd Smith', 'Quebec', 'QC', {'t1', 't2'}), - 'Quebec', 'QC', {'p1': 42, 'p2': 33}, [('Mike', 43), ('Gina', 39)]) - ), 'hello')] - } - - for typ, value in data.values(): - vertex_label = VertexLabel([typ]) - property_name = next(iter(vertex_label.non_pk_properties.keys())) - schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) - - vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] - - def get_vertex_properties(): - return list(schema.get_vertex_properties( - self.session, vertex, execution_profile=ep)) - - wait_until( - lambda: len(get_vertex_properties()) == 2, 0.2, 15) - - vertex_properties = get_vertex_properties() - for vp in vertex_properties: - if vp.label == 'pkid': - continue - - self.assertIsInstance(vp, VertexProperty) - self.assertEqual(vp.label, property_name) - self.assertEqual(vp.value, value) - - def _test_udt_with_classes(self, schema, graphson): - class Address(object): - - def __init__(self, address, city, state): - self.address = address - self.city = city - self.state = state - - def __eq__(self, other): - return self.address == other.address and self.city == other.city and self.state == other.state - - class AddressWithTags(object): - - def __init__(self, address, city, state, tags): - self.address = address - self.city = city - self.state = state - self.tags = tags - - def __eq__(self, other): - return (self.address == other.address and self.city == other.city - and self.state == other.state and self.tags == other.tags) - - class ComplexAddress(object): - - def __init__(self, address, address_tags, city, state, props): - self.address = address - self.address_tags = address_tags - self.city = city - self.state = state - self.props = props - - def __eq__(self, other): - return (self.address == other.address and self.address_tags == other.address_tags - and self.city == other.city and self.state == other.state - and self.props == other.props) - - class ComplexAddressWithOwners(object): - - def __init__(self, address, address_tags, city, state, props, owners): - self.address = address - self.address_tags = address_tags - self.city = city - self.state = state - self.props = props - self.owners = owners - - def __eq__(self, other): - return (self.address == other.address and self.address_tags == other.address_tags - and self.city == other.city and self.state == other.state - and self.props == other.props and self.owners == other.owners) - - self.__test_udt(schema, graphson, Address, AddressWithTags, ComplexAddress, ComplexAddressWithOwners) - - def _test_udt_with_namedtuples(self, schema, graphson): - AddressTuple = namedtuple('Address', ('address', 'city', 'state')) - AddressWithTagsTuple = namedtuple('AddressWithTags', ('address', 'city', 'state', 'tags')) - ComplexAddressTuple = namedtuple('ComplexAddress', ('address', 'address_tags', 'city', 'state', 'props')) - ComplexAddressWithOwnersTuple = namedtuple('ComplexAddressWithOwners', ('address', 'address_tags', 'city', - 'state', 'props', 'owners')) - - self.__test_udt(schema, graphson, AddressTuple, AddressWithTagsTuple, - ComplexAddressTuple, ComplexAddressWithOwnersTuple) - - -@requiredse -@GraphTestConfiguration.generate_tests(schema=ClassicGraphSchema) -class ClassicGraphDataTypeTest(GenericGraphDataTypeTest): - pass - - -@requiredse -@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) -class CoreGraphDataTypeTest(GenericGraphDataTypeTest): - pass diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py deleted file mode 100644 index 0c889938d8..0000000000 --- a/tests/integration/advanced/graph/test_graph_query.py +++ /dev/null @@ -1,594 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sys -from packaging.version import Version - -from copy import copy -from itertools import chain -import json -import time - -import unittest - -from cassandra import OperationTimedOut, ConsistencyLevel, InvalidRequest -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable -from cassandra.protocol import ServerError, SyntaxException -from cassandra.query import QueryTrace -from cassandra.util import Point -from cassandra.graph import (SimpleGraphStatement, single_object_row_factory, - Result, GraphOptions, GraphProtocol, to_bigint) -from cassandra.datastax.graph.query import _graph_options -from cassandra.datastax.graph.types import T - -from tests.integration import DSE_VERSION, requiredse, greaterthanorequaldse68 -from tests.integration.advanced.graph import BasicGraphUnitTestCase, GraphTestConfiguration, \ - validate_classic_vertex, GraphUnitTestCase, validate_classic_edge, validate_path_result_type, \ - validate_line_edge, validate_generic_vertex_result_type, \ - ClassicGraphSchema, CoreGraphSchema, VertexLabel - - -@requiredse -class BasicGraphQueryTest(BasicGraphUnitTestCase): - - def test_consistency_passing(self): - """ - Test to validated that graph consistency levels are properly surfaced to the base driver - - @since 1.0.0 - @jira_ticket PYTHON-509 - @expected_result graph consistency levels are surfaced correctly - @test_category dse graph - """ - cl_attrs = ('graph_read_consistency_level', 'graph_write_consistency_level') - - # Iterates over the graph options and constructs an array containing - # The graph_options that correlate to graoh read and write consistency levels - graph_params = [a[2] for a in _graph_options if a[0] in cl_attrs] - - s = self.session - default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] - default_graph_opts = default_profile.graph_options - try: - # Checks the default graph attributes and ensures that both graph_read_consistency_level and graph_write_consistency_level - # Are None by default - for attr in cl_attrs: - self.assertIsNone(getattr(default_graph_opts, attr)) - - res = s.execute_graph("null") - for param in graph_params: - self.assertNotIn(param, res.response_future.message.custom_payload) - - # session defaults are passed - opts = GraphOptions() - opts.update(default_graph_opts) - cl = {0: ConsistencyLevel.ONE, 1: ConsistencyLevel.LOCAL_QUORUM} - for k, v in cl.items(): - setattr(opts, cl_attrs[k], v) - default_profile.graph_options = opts - - res = s.execute_graph("null") - - for k, v in cl.items(): - self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], ConsistencyLevel.value_to_name[v].encode()) - - # passed profile values override session defaults - cl = {0: ConsistencyLevel.ALL, 1: ConsistencyLevel.QUORUM} - opts = GraphOptions() - opts.update(default_graph_opts) - for k, v in cl.items(): - attr_name = cl_attrs[k] - setattr(opts, attr_name, v) - self.assertNotEqual(getattr(default_profile.graph_options, attr_name), getattr(opts, attr_name)) - tmp_profile = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, graph_options=opts) - res = s.execute_graph("null", execution_profile=tmp_profile) - - for k, v in cl.items(): - self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], ConsistencyLevel.value_to_name[v].encode()) - finally: - default_profile.graph_options = default_graph_opts - - def test_execute_graph_row_factory(self): - s = self.session - - # default Results - default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] - self.assertEqual(default_profile.row_factory, None) # will be resolved to graph_object_row_factory - result = s.execute_graph("123")[0] - self.assertIsInstance(result, Result) - self.assertEqual(result.value, 123) - - # other via parameter - prof = s.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT, row_factory=single_object_row_factory) - rs = s.execute_graph("123", execution_profile=prof) - self.assertEqual(rs.response_future.row_factory, single_object_row_factory) - self.assertEqual(json.loads(rs[0]), {'result': 123}) - - def test_execute_graph_timeout(self): - s = self.session - - value = [1, 2, 3] - query = "[%r]" % (value,) - - # default is passed down - default_graph_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] - rs = self.session.execute_graph(query) - self.assertEqual(rs[0].value, value) - self.assertEqual(rs.response_future.timeout, default_graph_profile.request_timeout) - - # tiny timeout times out as expected - tmp_profile = copy(default_graph_profile) - tmp_profile.request_timeout = sys.float_info.min - - max_retry_count = 10 - for _ in range(max_retry_count): - start = time.time() - try: - with self.assertRaises(OperationTimedOut): - s.execute_graph(query, execution_profile=tmp_profile) - break - except: - end = time.time() - self.assertAlmostEqual(start, end, 1) - else: - raise Exception("session.execute_graph didn't time out in {0} tries".format(max_retry_count)) - - def test_profile_graph_options(self): - s = self.session - statement = SimpleGraphStatement("true") - ep = self.session.execution_profile_clone_update(EXEC_PROFILE_GRAPH_DEFAULT) - self.assertTrue(s.execute_graph(statement, execution_profile=ep)[0].value) - - # bad graph name to verify it's passed - ep.graph_options = ep.graph_options.copy() - ep.graph_options.graph_name = "definitely_not_correct" - try: - s.execute_graph(statement, execution_profile=ep) - except NoHostAvailable: - self.assertTrue(DSE_VERSION >= Version("6.0")) - except InvalidRequest: - self.assertTrue(DSE_VERSION >= Version("5.0")) - else: - if DSE_VERSION < Version("6.8"): # >6.8 returns true - self.fail("Should have risen ServerError or InvalidRequest") - - def test_additional_custom_payload(self): - s = self.session - custom_payload = {'some': 'example'.encode('utf-8'), 'items': 'here'.encode('utf-8')} - sgs = SimpleGraphStatement("null", custom_payload=custom_payload) - future = s.execute_graph_async(sgs) - - default_profile = s.cluster.profile_manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT] - default_graph_opts = default_profile.graph_options - for k, v in chain(custom_payload.items(), default_graph_opts.get_options_map().items()): - self.assertEqual(future.message.custom_payload[k], v) - - -class GenericGraphQueryTest(GraphUnitTestCase): - - def _test_basic_query(self, schema, graphson): - """ - Test to validate that basic graph query results can be executed with a sane result set. - - Creates a simple classic tinkerpot graph, and attempts to find all vertices - related the vertex marco, that have a label of knows. - See reference graph here - http://www.tinkerpop.com/docs/3.0.0.M1/ - - @since 1.0.0 - @jira_ticket PYTHON-457 - @expected_result graph should find two vertices related to marco via 'knows' edges. - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - rs = self.execute_graph('''g.V().has('name','marko').out('knows').values('name')''', graphson) - self.assertFalse(rs.has_more_pages) - results_list = self.resultset_to_list(rs) - self.assertEqual(len(results_list), 2) - self.assertIn('vadas', results_list) - self.assertIn('josh', results_list) - - def _test_geometric_graph_types(self, schema, graphson): - """ - Test to validate that geometric types function correctly - - Creates a very simple graph, and tries to insert a simple point type - - @since 1.0.0 - @jira_ticket DSP-8087 - @expected_result json types associated with insert is parsed correctly - - @test_category dse graph - """ - vertex_label = VertexLabel([('pointP', "Point()")]) - ep = self.get_execution_profile(graphson) - schema.create_vertex_label(self.session, vertex_label, ep) - # import org.apache.cassandra.db.marshal.geometry.Point; - rs = schema.add_vertex(self.session, vertex_label, 'pointP', Point(0, 1), ep) - - # if result set is not parsed correctly this will throw an exception - self.assertIsNotNone(rs) - - def _test_execute_graph_trace(self, schema, graphson): - value = [1, 2, 3] - query = "[%r]" % (value,) - - # default is no trace - rs = self.execute_graph(query, graphson) - results = self.resultset_to_list(rs) - self.assertEqual(results[0], value) - self.assertIsNone(rs.get_query_trace()) - - # request trace - rs = self.execute_graph(query, graphson, trace=True) - results = self.resultset_to_list(rs) - self.assertEqual(results[0], value) - qt = rs.get_query_trace(max_wait_sec=10) - self.assertIsInstance(qt, QueryTrace) - self.assertIsNotNone(qt.duration) - - def _test_range_query(self, schema, graphson): - """ - Test to validate range queries are handled correctly. - - Creates a very large line graph script and executes it. Then proceeds to to a range - limited query against it, and ensure that the results are formatted correctly and that - the result set is properly sized. - - @since 1.0.0 - @jira_ticket PYTHON-457 - @expected_result result set should be properly formatted and properly sized - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.line(150), graphson) - rs = self.execute_graph("g.E().range(0,10)", graphson) - self.assertFalse(rs.has_more_pages) - results = self.resultset_to_list(rs) - self.assertEqual(len(results), 10) - ep = self.get_execution_profile(graphson) - for result in results: - schema.ensure_properties(self.session, result, execution_profile=ep) - validate_line_edge(self, result) - - def _test_classic_graph(self, schema, graphson): - """ - Test to validate that basic graph generation, and vertex and edges are surfaced correctly - - Creates a simple classic tinkerpot graph, and iterates over the the vertices and edges - ensureing that each one is correct. See reference graph here - http://www.tinkerpop.com/docs/3.0.0.M1/ - - @since 1.0.0 - @jira_ticket PYTHON-457 - @expected_result graph should generate and all vertices and edge results should be - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - rs = self.execute_graph('g.V()', graphson) - ep = self.get_execution_profile(graphson) - for vertex in rs: - schema.ensure_properties(self.session, vertex, execution_profile=ep) - validate_classic_vertex(self, vertex) - rs = self.execute_graph('g.E()', graphson) - for edge in rs: - schema.ensure_properties(self.session, edge, execution_profile=ep) - validate_classic_edge(self, edge) - - def _test_graph_classic_path(self, schema, graphson): - """ - Test to validate that the path version of the result type is generated correctly. It also - tests basic path results as that is not covered elsewhere - - @since 1.0.0 - @jira_ticket PYTHON-479 - @expected_result path object should be unpacked correctly including all nested edges and verticies - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - rs = self.execute_graph("g.V().hasLabel('person').has('name', 'marko').as('a').outE('knows').inV().as('c', 'd')." - " outE('created').as('e', 'f', 'g').inV().path()", - graphson) - rs_list = list(rs) - self.assertEqual(len(rs_list), 2) - for result in rs_list: - try: - path = result.as_path() - except: - path = result - - ep = self.get_execution_profile(graphson) - for obj in path.objects: - schema.ensure_properties(self.session, obj, ep) - - validate_path_result_type(self, path) - - def _test_large_create_script(self, schema, graphson): - """ - Test to validate that server errors due to large groovy scripts are properly surfaced - - Creates a very large line graph script and executes it. Then proceeds to create a line graph script - that is to large for the server to handle expects a server error to be returned - - @since 1.0.0 - @jira_ticket PYTHON-457 - @expected_result graph should generate and all vertices and edge results should be - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.line(150), graphson) - self.execute_graph(schema.fixtures.line(300), graphson) # This should passed since the queries are splitted - self.assertRaises(SyntaxException, self.execute_graph, schema.fixtures.line(300, single_script=True), graphson) # this is not and too big - - def _test_large_result_set(self, schema, graphson): - """ - Test to validate that large result sets return correctly. - - Creates a very large graph. Ensures that large result sets are handled appropriately. - - @since 1.0.0 - @jira_ticket PYTHON-457 - @expected_result when limits of result sets are hit errors should be surfaced appropriately - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.large(), graphson, execution_profile_options={'request_timeout': 32}) - rs = self.execute_graph("g.V()", graphson) - for result in rs: - validate_generic_vertex_result_type(self, result) - - def _test_param_passing(self, schema, graphson): - """ - Test to validate that parameter passing works as expected - - @since 1.0.0 - @jira_ticket PYTHON-457 - @expected_result parameters work as expected - - @test_category dse graph - """ - - # unused parameters are passed, but ignored - self.execute_graph("null", graphson, params={"doesn't": "matter", "what's": "passed"}) - - # multiple params - rs = self.execute_graph("[a, b]", graphson, params={'a': 0, 'b': 1}) - results = self.resultset_to_list(rs) - self.assertEqual(results[0], 0) - self.assertEqual(results[1], 1) - - if graphson == GraphProtocol.GRAPHSON_1_0: - # different value types - for param in (None, "string", 1234, 5.678, True, False): - result = self.resultset_to_list(self.execute_graph('x', graphson, params={'x': param}))[0] - self.assertEqual(result, param) - - def _test_vertex_property_properties(self, schema, graphson): - """ - Test verifying vertex property properties - - @since 1.0.0 - @jira_ticket PYTHON-487 - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('skipped because rich properties are only supported with classic graphs') - - self.execute_graph("schema.propertyKey('k0').Text().ifNotExists().create();", graphson) - self.execute_graph("schema.propertyKey('k1').Text().ifNotExists().create();", graphson) - self.execute_graph("schema.propertyKey('key').Text().properties('k0', 'k1').ifNotExists().create();", graphson) - self.execute_graph("schema.vertexLabel('MLP').properties('key').ifNotExists().create();", graphson) - v = self.execute_graph('''v = graph.addVertex('MLP') - v.property('key', 'value', 'k0', 'v0', 'k1', 'v1') - v''', graphson)[0] - self.assertEqual(len(v.properties), 1) - self.assertEqual(len(v.properties['key']), 1) - p = v.properties['key'][0] - self.assertEqual(p.label, 'key') - self.assertEqual(p.value, 'value') - self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'}) - - def _test_vertex_multiple_properties(self, schema, graphson): - """ - Test verifying vertex property form for various Cardinality - - All key types are encoded as a list, regardless of cardinality - - Single cardinality properties have only one value -- the last one added - - Default is single (this is config dependent) - - @since 1.0.0 - @jira_ticket PYTHON-487 - - @test_category dse graph - """ - if schema is not ClassicGraphSchema: - raise unittest.SkipTest('skipped because multiple properties are only supported with classic graphs') - - self.execute_graph('''Schema schema = graph.schema(); - schema.propertyKey('mult_key').Text().multiple().ifNotExists().create(); - schema.propertyKey('single_key').Text().single().ifNotExists().create(); - schema.vertexLabel('MPW1').properties('mult_key').ifNotExists().create(); - schema.vertexLabel('SW1').properties('single_key').ifNotExists().create();''', graphson) - - v = self.execute_graph('''v = graph.addVertex('MPW1') - v.property('mult_key', 'value') - v''', graphson)[0] - self.assertEqual(len(v.properties), 1) - self.assertEqual(len(v.properties['mult_key']), 1) - self.assertEqual(v.properties['mult_key'][0].label, 'mult_key') - self.assertEqual(v.properties['mult_key'][0].value, 'value') - - # multiple_with_two_values - v = self.execute_graph('''g.addV('MPW1').property('mult_key', 'value0').property('mult_key', 'value1')''', graphson)[0] - self.assertEqual(len(v.properties), 1) - self.assertEqual(len(v.properties['mult_key']), 2) - self.assertEqual(v.properties['mult_key'][0].label, 'mult_key') - self.assertEqual(v.properties['mult_key'][1].label, 'mult_key') - self.assertEqual(v.properties['mult_key'][0].value, 'value0') - self.assertEqual(v.properties['mult_key'][1].value, 'value1') - - # single_with_one_value - v = self.execute_graph('''v = graph.addVertex('SW1') - v.property('single_key', 'value') - v''', graphson)[0] - self.assertEqual(len(v.properties), 1) - self.assertEqual(len(v.properties['single_key']), 1) - self.assertEqual(v.properties['single_key'][0].label, 'single_key') - self.assertEqual(v.properties['single_key'][0].value, 'value') - - if DSE_VERSION < Version('6.8'): - # single_with_two_values - with self.assertRaises(InvalidRequest): - v = self.execute_graph(''' - v = graph.addVertex('SW1') - v.property('single_key', 'value0').property('single_key', 'value1').next() - v - ''', graphson)[0] - else: - # >=6.8 single_with_two_values, first one wins - v = self.execute_graph('''v = graph.addVertex('SW1') - v.property('single_key', 'value0').property('single_key', 'value1') - v''', graphson)[0] - self.assertEqual(v.properties['single_key'][0].value, 'value0') - - def _test_result_forms(self, schema, graphson): - """ - Test to validate that geometric types function correctly - - Creates a very simple graph, and tries to insert a simple point type - - @since 1.0.0 - @jira_ticket DSP-8087 - @expected_result json types associated with insert is parsed correctly - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - ep = self.get_execution_profile(graphson) - - results = self.resultset_to_list(self.session.execute_graph('g.V()', execution_profile=ep)) - self.assertGreater(len(results), 0, "Result set was empty this was not expected") - for v in results: - schema.ensure_properties(self.session, v, ep) - validate_classic_vertex(self, v) - - results = self.resultset_to_list(self.session.execute_graph('g.E()', execution_profile=ep)) - self.assertGreater(len(results), 0, "Result set was empty this was not expected") - for e in results: - schema.ensure_properties(self.session, e, ep) - validate_classic_edge(self, e) - - def _test_query_profile(self, schema, graphson): - """ - Test to validate profiling results are deserialized properly. - - @since 1.6.0 - @jira_ticket PYTHON-1057 - @expected_result TraversalMetrics and Metrics are deserialized properly - - @test_category dse graph - """ - if graphson == GraphProtocol.GRAPHSON_1_0: - raise unittest.SkipTest('skipped because there is no metrics deserializer with graphson1') - - ep = self.get_execution_profile(graphson) - results = list(self.session.execute_graph("g.V().profile()", execution_profile=ep)) - self.assertEqual(len(results), 1) - self.assertIn('metrics', results[0]) - self.assertIn('dur', results[0]) - self.assertEqual(len(results[0]['metrics']), 2) - self.assertIn('dur', results[0]['metrics'][0]) - - def _test_query_bulkset(self, schema, graphson): - """ - Test to validate bulkset results are deserialized properly. - - @since 1.6.0 - @jira_ticket PYTHON-1060 - @expected_result BulkSet is deserialized properly to a list - - @test_category dse graph - """ - self.execute_graph(schema.fixtures.classic(), graphson) - ep = self.get_execution_profile(graphson) - results = list(self.session.execute_graph( - 'g.V().hasLabel("person").aggregate("x").by("age").cap("x")', - execution_profile=ep)) - self.assertEqual(len(results), 1) - results = results[0] - if type(results) is Result: - results = results.value - else: - self.assertEqual(len(results), 5) - self.assertEqual(results.count(35), 2) - - @greaterthanorequaldse68 - def _test_elementMap_query(self, schema, graphson): - """ - Test to validate that an elementMap can be serialized properly. - """ - self.execute_graph(schema.fixtures.classic(), graphson) - rs = self.execute_graph('''g.V().has('name','marko').elementMap()''', graphson) - results_list = self.resultset_to_list(rs) - self.assertEqual(len(results_list), 1) - row = results_list[0] - if graphson == GraphProtocol.GRAPHSON_3_0: - self.assertIn(T.id, row) - self.assertIn(T.label, row) - if schema is CoreGraphSchema: - self.assertEqual(row[T.id], 'dseg:/person/marko') - self.assertEqual(row[T.label], 'person') - else: - self.assertIn('id', row) - self.assertIn('label', row) - - -@GraphTestConfiguration.generate_tests(schema=ClassicGraphSchema) -class ClassicGraphQueryTest(GenericGraphQueryTest): - pass - - -@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) -class CoreGraphQueryTest(GenericGraphQueryTest): - pass - - -@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema) -class CoreGraphQueryWithTypeWrapperTest(GraphUnitTestCase): - - def _test_basic_query_with_type_wrapper(self, schema, graphson): - """ - Test to validate that a query using a type wrapper works. - - @since 2.8.0 - @jira_ticket PYTHON-1051 - @expected_result graph query works and doesn't raise an exception - - @test_category dse graph - """ - ep = self.get_execution_profile(graphson) - vl = VertexLabel(['tupleOf(Int, Bigint)']) - schema.create_vertex_label(self.session, vl, execution_profile=ep) - - prop_name = next(iter(vl.non_pk_properties.keys())) - with self.assertRaises(InvalidRequest): - schema.add_vertex(self.session, vl, prop_name, (1, 42), execution_profile=ep) - - schema.add_vertex(self.session, vl, prop_name, (1, to_bigint(42)), execution_profile=ep) diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py deleted file mode 100644 index 66f682fd49..0000000000 --- a/tests/integration/advanced/test_adv_metadata.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from packaging.version import Version - -from tests.integration import (BasicExistingKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, - BasicSharedKeyspaceUnitTestCaseRF1, - greaterthanorequaldse51, greaterthanorequaldse60, - greaterthanorequaldse68, use_single_node, - DSE_VERSION, requiredse, TestCluster) - -import unittest - -import logging -import time - - -log = logging.getLogger(__name__) - - -def setup_module(): - if DSE_VERSION: - use_single_node() - - -@requiredse -@greaterthanorequaldse60 -class FunctionAndAggregateMetadataTests(BasicSharedKeyspaceUnitTestCaseRF1): - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - super(FunctionAndAggregateMetadataTests, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - if DSE_VERSION: - super(FunctionAndAggregateMetadataTests, cls).tearDownClass() - - def setUp(self): - self.func_name = self.function_table_name + '_func' - self.agg_name = self.function_table_name + '_agg(int)' - - def _populated_ks_meta_attr(self, attr_name): - val, start_time = None, time.time() - while not val: - self.cluster.refresh_schema_metadata() - val = getattr(self.cluster.metadata.keyspaces[self.keyspace_name], - attr_name) - self.assertLess(time.time(), start_time + 30, - 'did not see func in metadata in 30s') - log.debug('done blocking; dict is populated: {}'.format(val)) - return val - - def test_monotonic_on_and_deterministic_function(self): - self.session.execute(""" - CREATE FUNCTION {ksn}.{ftn}(key int, val int) - RETURNS NULL ON NULL INPUT - RETURNS int - DETERMINISTIC - MONOTONIC ON val - LANGUAGE java AS 'return key+val;'; - """.format(ksn=self.keyspace_name, - ftn=self.func_name)) - fn = self._populated_ks_meta_attr('functions')[ - '{}(int,int)'.format(self.func_name) - ] - self.assertEqual(fn.monotonic_on, ['val']) - # monotonic is not set by MONOTONIC ON - self.assertFalse(fn.monotonic) - self.assertTrue(fn.deterministic) - self.assertEqual('CREATE FUNCTION {ksn}.{ftn}(key int, val int) ' - 'RETURNS NULL ON NULL INPUT ' - 'RETURNS int DETERMINISTIC MONOTONIC ON val ' - 'LANGUAGE java AS $$return key+val;$$' - ''.format(ksn=self.keyspace_name, - ftn=self.func_name), - fn.as_cql_query()) - self.session.execute('DROP FUNCTION {}.{}'.format(self.keyspace_name, - self.func_name)) - self.session.execute(fn.as_cql_query()) - - def test_monotonic_all_and_nondeterministic_function(self): - self.session.execute(""" - CREATE FUNCTION {ksn}.{ftn}(key int, val int) - RETURNS NULL ON NULL INPUT - RETURNS int - MONOTONIC - LANGUAGE java AS 'return key+val;'; - """.format(ksn=self.keyspace_name, - ftn=self.func_name)) - fn = self._populated_ks_meta_attr('functions')[ - '{}(int,int)'.format(self.func_name) - ] - self.assertEqual(set(fn.monotonic_on), {'key', 'val'}) - self.assertTrue(fn.monotonic) - self.assertFalse(fn.deterministic) - self.assertEqual('CREATE FUNCTION {ksn}.{ftn}(key int, val int) ' - 'RETURNS NULL ON NULL INPUT RETURNS int MONOTONIC ' - 'LANGUAGE java AS $$return key+val;$$' - ''.format(ksn=self.keyspace_name, - ftn=self.func_name), - fn.as_cql_query()) - self.session.execute('DROP FUNCTION {}.{}'.format(self.keyspace_name, - self.func_name)) - self.session.execute(fn.as_cql_query()) - - def _create_func_for_aggregate(self): - self.session.execute(""" - CREATE FUNCTION {ksn}.{ftn}(key int, val int) - RETURNS NULL ON NULL INPUT - RETURNS int - DETERMINISTIC - LANGUAGE java AS 'return key+val;'; - """.format(ksn=self.keyspace_name, - ftn=self.func_name)) - - def test_deterministic_aggregate(self): - self._create_func_for_aggregate() - self.session.execute(""" - CREATE AGGREGATE {ksn}.{an} - SFUNC {ftn} - STYPE int - INITCOND 0 - DETERMINISTIC - """.format(ksn=self.keyspace_name, - ftn=self.func_name, - an=self.agg_name)) - ag = self._populated_ks_meta_attr('aggregates')[self.agg_name] - self.assertTrue(ag.deterministic) - self.assertEqual( - 'CREATE AGGREGATE {ksn}.{an} SFUNC ' - '{ftn} STYPE int INITCOND 0 DETERMINISTIC' - ''.format(ksn=self.keyspace_name, - ftn=self.func_name, - an=self.agg_name), - ag.as_cql_query()) - self.session.execute('DROP AGGREGATE {}.{}'.format(self.keyspace_name, - self.agg_name)) - self.session.execute(ag.as_cql_query()) - - def test_nondeterministic_aggregate(self): - self._create_func_for_aggregate() - self.session.execute(""" - CREATE AGGREGATE {ksn}.{an} - SFUNC {ftn} - STYPE int - INITCOND 0 - """.format(ksn=self.keyspace_name, - ftn=self.func_name, - an=self.agg_name)) - ag = self._populated_ks_meta_attr('aggregates')[self.agg_name] - self.assertFalse(ag.deterministic) - self.assertEqual( - 'CREATE AGGREGATE {ksn}.{an} SFUNC ' - '{ftn} STYPE int INITCOND 0' - ''.format(ksn=self.keyspace_name, - ftn=self.func_name, - an=self.agg_name), - ag.as_cql_query()) - self.session.execute('DROP AGGREGATE {}.{}'.format(self.keyspace_name, - self.agg_name)) - self.session.execute(ag.as_cql_query()) - - -@requiredse -class RLACMetadataTests(BasicSharedKeyspaceUnitTestCase): - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - super(RLACMetadataTests, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - if DSE_VERSION: - super(RLACMetadataTests, cls).setUpClass() - - @greaterthanorequaldse51 - def test_rlac_on_table(self): - """ - Checks to ensure that the RLAC table extension appends the proper cql on export to tables - - @since 3.20 - @jira_ticket PYTHON-638 - @expected_result Invalid hosts on the contact list should be excluded - - @test_category metadata - """ - self.session.execute("CREATE TABLE {0}.reports (" - " report_user text, " - " report_number int, " - " report_month int, " - " report_year int, " - " report_text text," - " PRIMARY KEY (report_user, report_number))".format(self.keyspace_name)) - restrict_cql = "RESTRICT ROWS ON {0}.reports USING report_user".format(self.keyspace_name) - self.session.execute(restrict_cql) - table_meta = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports'] - self.assertTrue(restrict_cql in table_meta.export_as_string()) - - @unittest.skip("Dse 5.1 doesn't support MV and RLAC remove after update") - @greaterthanorequaldse51 - def test_rlac_on_mv(self): - """ - Checks to ensure that the RLAC table extension appends the proper cql to export on mV's - - @since 3.20 - @jira_ticket PYTHON-682 - @expected_result Invalid hosts on the contact list should be excluded - - @test_category metadata - """ - self.session.execute("CREATE TABLE {0}.reports2 (" - " report_user text, " - " report_number int, " - " report_month int, " - " report_year int, " - " report_text text," - " PRIMARY KEY (report_user, report_number))".format(self.keyspace_name)) - self.session.execute("CREATE MATERIALIZED VIEW {0}.reports_by_year AS " - " SELECT report_year, report_user, report_number, report_text FROM {0}.reports2 " - " WHERE report_user IS NOT NULL AND report_number IS NOT NULL AND report_year IS NOT NULL " - " PRIMARY KEY ((report_year, report_user), report_number)".format(self.keyspace_name)) - - restrict_cql_table = "RESTRICT ROWS ON {0}.reports2 USING report_user".format(self.keyspace_name) - self.session.execute(restrict_cql_table) - restrict_cql_view = "RESTRICT ROWS ON {0}.reports_by_year USING report_user".format(self.keyspace_name) - self.session.execute(restrict_cql_view) - table_cql = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports2'].export_as_string() - view_cql = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports2'].views["reports_by_year"].export_as_string() - self.assertTrue(restrict_cql_table in table_cql) - self.assertTrue(restrict_cql_view in table_cql) - self.assertTrue(restrict_cql_view in view_cql) - self.assertTrue(restrict_cql_table not in view_cql) - - -@requiredse -class NodeSyncMetadataTests(BasicSharedKeyspaceUnitTestCase): - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - super(NodeSyncMetadataTests, cls).setUpClass() - - @classmethod - def tearDownClass(cls): - if DSE_VERSION: - super(NodeSyncMetadataTests, cls).setUpClass() - - @greaterthanorequaldse60 - def test_nodesync_on_table(self): - """ - Checks to ensure that nodesync is visible through driver metadata - - @since 3.20 - @jira_ticket PYTHON-799 - @expected_result nodesync should be enabled - - @test_category metadata - """ - self.session.execute("CREATE TABLE {0}.reports (" - " report_user text PRIMARY KEY" - ") WITH nodesync = {{" - "'enabled': 'true', 'deadline_target_sec' : 86400 }};".format( - self.keyspace_name - )) - table_meta = self.cluster.metadata.keyspaces[self.keyspace_name].tables['reports'] - self.assertIn('nodesync =', table_meta.export_as_string()) - self.assertIn('nodesync', table_meta.options) - - -@greaterthanorequaldse68 -class GraphMetadataTests(BasicExistingKeyspaceUnitTestCase): - """ - Various tests to ensure that graph metadata are visible through driver metadata - @since DSE6.8 - @jira_ticket PYTHON-996 - @expected_result graph metadata are fetched - @test_category metadata - """ - - @classmethod - def setUpClass(cls): - if DSE_VERSION and DSE_VERSION >= Version('6.8'): - super(GraphMetadataTests, cls).setUpClass() - cls.session.execute(""" - CREATE KEYSPACE ks_no_graph_engine WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; - """) - cls.session.execute(""" - CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} and graph_engine = 'Core'; - """ % (cls.ks_name,)) - - cls.session.execute(""" - CREATE TABLE %s.person (name text PRIMARY KEY) WITH VERTEX LABEL; - """ % (cls.ks_name,)) - - cls.session.execute(""" - CREATE TABLE %s.software(company text, name text, version int, PRIMARY KEY((company, name), version)) WITH VERTEX LABEL rocksolidsoftware; - """ % (cls.ks_name,)) - - cls.session.execute(""" - CREATE TABLE %s.contributors (contributor text, company_name text, software_name text, software_version int, - PRIMARY KEY (contributor, company_name, software_name, software_version) ) - WITH CLUSTERING ORDER BY (company_name ASC, software_name ASC, software_version ASC) - AND EDGE LABEL contrib FROM person(contributor) TO rocksolidsoftware((company_name, software_name), software_version); - """ % (cls.ks_name,)) - - @classmethod - def tearDownClass(cls): - if DSE_VERSION and DSE_VERSION >= Version('6.8'): - cls.session.execute('DROP KEYSPACE {0}'.format('ks_no_graph_engine')) - cls.session.execute('DROP KEYSPACE {0}'.format(cls.ks_name)) - cls.cluster.shutdown() - - def test_keyspace_metadata(self): - self.assertIsNone(self.cluster.metadata.keyspaces['ks_no_graph_engine'].graph_engine, None) - self.assertEqual(self.cluster.metadata.keyspaces[self.ks_name].graph_engine, 'Core') - - def test_keyspace_metadata_alter_graph_engine(self): - self.session.execute("ALTER KEYSPACE %s WITH graph_engine = 'Tinker'" % (self.ks_name,)) - self.assertEqual(self.cluster.metadata.keyspaces[self.ks_name].graph_engine, 'Tinker') - self.session.execute("ALTER KEYSPACE %s WITH graph_engine = 'Core'" % (self.ks_name,)) - self.assertEqual(self.cluster.metadata.keyspaces[self.ks_name].graph_engine, 'Core') - - def test_vertex_metadata(self): - vertex_meta = self.cluster.metadata.keyspaces[self.ks_name].tables['person'].vertex - self.assertEqual(vertex_meta.keyspace_name, self.ks_name) - self.assertEqual(vertex_meta.table_name, 'person') - self.assertEqual(vertex_meta.label_name, 'person') - - vertex_meta = self.cluster.metadata.keyspaces[self.ks_name].tables['software'].vertex - self.assertEqual(vertex_meta.keyspace_name, self.ks_name) - self.assertEqual(vertex_meta.table_name, 'software') - self.assertEqual(vertex_meta.label_name, 'rocksolidsoftware') - - def test_edge_metadata(self): - edge_meta = self.cluster.metadata.keyspaces[self.ks_name].tables['contributors'].edge - self.assertEqual(edge_meta.keyspace_name, self.ks_name) - self.assertEqual(edge_meta.table_name, 'contributors') - self.assertEqual(edge_meta.label_name, 'contrib') - self.assertEqual(edge_meta.from_table, 'person') - self.assertEqual(edge_meta.from_label, 'person') - self.assertEqual(edge_meta.from_partition_key_columns, ['contributor']) - self.assertEqual(edge_meta.from_clustering_columns, []) - self.assertEqual(edge_meta.to_table, 'software') - self.assertEqual(edge_meta.to_label, 'rocksolidsoftware') - self.assertEqual(edge_meta.to_partition_key_columns, ['company_name', 'software_name']) - self.assertEqual(edge_meta.to_clustering_columns, ['software_version']) - - -@greaterthanorequaldse68 -class GraphMetadataSchemaErrorTests(BasicExistingKeyspaceUnitTestCase): - """ - Test that we can connect when the graph schema is broken. - """ - - def test_connection_on_graph_schema_error(self): - self.session = self.cluster.connect() - - self.session.execute(""" - CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} and graph_engine = 'Core'; - """ % (self.ks_name,)) - - self.session.execute(""" - CREATE TABLE %s.person (name text PRIMARY KEY) WITH VERTEX LABEL; - """ % (self.ks_name,)) - - self.session.execute(""" - CREATE TABLE %s.software(company text, name text, version int, PRIMARY KEY((company, name), version)) WITH VERTEX LABEL rocksolidsoftware; - """ % (self.ks_name,)) - - self.session.execute(""" - CREATE TABLE %s.contributors (contributor text, company_name text, software_name text, software_version int, - PRIMARY KEY (contributor, company_name, software_name, software_version) ) - WITH CLUSTERING ORDER BY (company_name ASC, software_name ASC, software_version ASC) - AND EDGE LABEL contrib FROM person(contributor) TO rocksolidsoftware((company_name, software_name), software_version); - """ % (self.ks_name,)) - - self.session.execute('TRUNCATE system_schema.vertices') - TestCluster().connect().shutdown() diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py deleted file mode 100644 index 438d4e8018..0000000000 --- a/tests/integration/advanced/test_auth.py +++ /dev/null @@ -1,532 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import unittest -import logging -import os -import subprocess -import time - -from ccmlib.dse_cluster import DseCluster -from nose.plugins.attrib import attr -from packaging.version import Version - -from cassandra.auth import (DSEGSSAPIAuthProvider, DSEPlainTextAuthProvider, - SaslAuthProvider, TransitionalModePlainTextAuthProvider) -from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable -from cassandra.protocol import Unauthorized -from cassandra.query import SimpleStatement -from tests.integration import (get_cluster, greaterthanorequaldse51, - remove_cluster, requiredse, DSE_VERSION, TestCluster) -from tests.integration.advanced import ADS_HOME, use_single_node_with_graph -from tests.integration.advanced.graph import reset_graph, ClassicGraphFixtures - - -log = logging.getLogger(__name__) - - -def setup_module(): - if DSE_VERSION: - use_single_node_with_graph() - - -def teardown_module(): - if DSE_VERSION: - remove_cluster() # this test messes with config - - -def wait_role_manager_setup_then_execute(session, statements): - for s in statements: - exc = None - for attempt in range(3): - try: - session.execute(s) - break - except Exception as e: - exc = e - time.sleep(5) - else: # if we didn't reach `break` - if exc is not None: - raise exc - - -@attr('long') -@requiredse -class BasicDseAuthTest(unittest.TestCase): - - @classmethod - def setUpClass(self): - """ - This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable - and our custom embedded apache directory server jar in order to run. - """ - if not DSE_VERSION: - return - - clear_kerberos_tickets() - self.cluster = None - - # Setup variables for various keytab and other files - self.conf_file_dir = os.path.join(ADS_HOME, "conf/") - self.krb_conf = os.path.join(self.conf_file_dir, "krb5.conf") - self.dse_keytab = os.path.join(self.conf_file_dir, "dse.keytab") - self.dseuser_keytab = os.path.join(self.conf_file_dir, "dseuser.keytab") - self.cassandra_keytab = os.path.join(self.conf_file_dir, "cassandra.keytab") - self.bob_keytab = os.path.join(self.conf_file_dir, "bob.keytab") - self.charlie_keytab = os.path.join(self.conf_file_dir, "charlie.keytab") - actual_jar = os.path.join(ADS_HOME, "embedded-ads.jar") - - # Create configuration directories if they don't already exists - if not os.path.exists(self.conf_file_dir): - os.makedirs(self.conf_file_dir) - if not os.path.exists(actual_jar): - raise RuntimeError('could not find {}'.format(actual_jar)) - log.warning("Starting adserver") - # Start the ADS, this will create the keytab con configuration files listed above - self.proc = subprocess.Popen(['java', '-jar', actual_jar, '-k', '--confdir', self.conf_file_dir], shell=False) - time.sleep(10) - # TODO poll for server to come up - - log.warning("Starting adserver started") - ccm_cluster = get_cluster() - log.warning("fetching tickets") - # Stop cluster if running and configure it with the correct options - ccm_cluster.stop() - if isinstance(ccm_cluster, DseCluster): - # Setup kerberos options in cassandra.yaml - config_options = {'kerberos_options': {'keytab': self.dse_keytab, - 'service_principal': 'dse/_HOST@DATASTAX.COM', - 'qop': 'auth'}, - 'authentication_options': {'enabled': 'true', - 'default_scheme': 'kerberos', - 'scheme_permissions': 'true', - 'allow_digest_with_kerberos': 'true', - 'plain_text_without_ssl': 'warn', - 'transitional_mode': 'disabled'}, - 'authorization_options': {'enabled': 'true'}} - - krb5java = "-Djava.security.krb5.conf=" + self.krb_conf - # Setup dse authenticator in cassandra.yaml - ccm_cluster.set_configuration_options({ - 'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator', - 'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer' - }) - ccm_cluster.set_dse_configuration_options(config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=[krb5java]) - else: - log.error("Cluster is not dse cluster test will fail") - - @classmethod - def tearDownClass(self): - """ - Terminates running ADS (Apache directory server). - """ - if not DSE_VERSION: - return - - self.proc.terminate() - - def tearDown(self): - """ - This will clear any existing kerberos tickets by using kdestroy - """ - clear_kerberos_tickets() - if self.cluster: - self.cluster.shutdown() - - def refresh_kerberos_tickets(self, keytab_file, user_name, krb_conf): - """ - Fetches a new ticket for using the keytab file and username provided. - """ - self.ads_pid = subprocess.call(['kinit', '-t', keytab_file, user_name], env={'KRB5_CONFIG': krb_conf}, shell=False) - - def connect_and_query(self, auth_provider, query=None): - """ - Runs a simple system query with the auth_provided specified. - """ - os.environ['KRB5_CONFIG'] = self.krb_conf - self.cluster = TestCluster(auth_provider=auth_provider) - self.session = self.cluster.connect() - query = query if query else "SELECT * FROM system.local WHERE key='local'" - statement = SimpleStatement(query) - rs = self.session.execute(statement) - return rs - - def test_should_not_authenticate_with_bad_user_ticket(self): - """ - This tests will attempt to authenticate with a user that has a valid ticket, but is not a valid dse user. - @since 3.20 - @jira_ticket PYTHON-457 - @test_category dse auth - @expected_result NoHostAvailable exception should be thrown - - """ - self.refresh_kerberos_tickets(self.dseuser_keytab, "dseuser@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) - self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) - - def test_should_not_athenticate_without_ticket(self): - """ - This tests will attempt to authenticate with a user that is valid but has no ticket - @since 3.20 - @jira_ticket PYTHON-457 - @test_category dse auth - @expected_result NoHostAvailable exception should be thrown - - """ - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) - self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) - - def test_connect_with_kerberos(self): - """ - This tests will attempt to authenticate with a user that is valid and has a ticket - @since 3.20 - @jira_ticket PYTHON-457 - @test_category dse auth - @expected_result Client should be able to connect and run a basic query - - """ - self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider() - rs = self.connect_and_query(auth_provider) - self.assertIsNotNone(rs) - connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()] - # Check to make sure our server_authenticator class is being set appropriate - for connection in connections: - self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class) - - def test_connect_with_kerberos_and_graph(self): - """ - This tests will attempt to authenticate with a user and execute a graph query - @since 3.20 - @jira_ticket PYTHON-457 - @test_category dse auth - @expected_result Client should be able to connect and run a basic graph query with authentication - - """ - self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) - - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"]) - rs = self.connect_and_query(auth_provider) - self.assertIsNotNone(rs) - reset_graph(self.session, self._testMethodName.lower()) - profiles = self.cluster.profile_manager.profiles - profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = self._testMethodName.lower() - self.session.execute_graph(ClassicGraphFixtures.classic()) - - rs = self.session.execute_graph('g.V()') - self.assertIsNotNone(rs) - - def test_connect_with_kerberos_host_not_resolved(self): - """ - This tests will attempt to authenticate with IP, this will fail on osx. - The success or failure of this test is dependent on a reverse dns lookup which can be impacted by your environment - if it fails don't panic. - @since 3.20 - @jira_ticket PYTHON-566 - @test_category dse auth - @expected_result Client should error when ip is used - - """ - self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) - DSEGSSAPIAuthProvider(service='dse', qops=["auth"], resolve_host_name=False) - - def test_connect_with_explicit_principal(self): - """ - This tests will attempt to authenticate using valid and invalid user principals - @since 3.20 - @jira_ticket PYTHON-574 - @test_category dse auth - @expected_result Client principals should be used by the underlying mechanism - - """ - - # Connect with valid principal - self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="cassandra@DATASTAX.COM") - self.connect_and_query(auth_provider) - connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()] - - # Check to make sure our server_authenticator class is being set appropriate - for connection in connections: - self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class) - - # Use invalid principal - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="notauser@DATASTAX.COM") - self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider) - - @greaterthanorequaldse51 - def test_proxy_login_with_kerberos(self): - """ - Test that the proxy login works with kerberos. - """ - # Set up users for proxy login test - self._setup_for_proxy() - - query = "select * from testkrbproxy.testproxy" - - # Try normal login with Charlie - self.refresh_kerberos_tickets(self.charlie_keytab, "charlie@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="charlie@DATASTAX.COM") - self.connect_and_query(auth_provider, query=query) - - # Try proxy login with bob - self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM", - authorization_id='charlie@DATASTAX.COM') - self.connect_and_query(auth_provider, query=query) - - # Try logging with bob without mentioning charlie - self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM") - self.assertRaises(Unauthorized, self.connect_and_query, auth_provider, query=query) - - self._remove_proxy_setup() - - @greaterthanorequaldse51 - def test_proxy_login_with_kerberos_forbidden(self): - """ - Test that the proxy login fail when proxy role is not granted - """ - # Set up users for proxy login test - self._setup_for_proxy(False) - query = "select * from testkrbproxy.testproxy" - - # Try normal login with Charlie - self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM", - authorization_id='charlie@DATASTAX.COM') - self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider, query=query) - - self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM") - self.assertRaises(Unauthorized, self.connect_and_query, auth_provider, query=query) - - self._remove_proxy_setup() - - def _remove_proxy_setup(self): - os.environ['KRB5_CONFIG'] = self.krb_conf - self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM') - cluster = TestCluster(auth_provider=auth_provider) - session = cluster.connect() - - session.execute("REVOKE PROXY.LOGIN ON ROLE '{0}' FROM '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM')) - - session.execute("DROP ROLE IF EXISTS '{0}';".format('bob@DATASTAX.COM')) - session.execute("DROP ROLE IF EXISTS '{0}';".format('charlie@DATASTAX.COM')) - - # Create a keyspace and allow only charlie to query it. - - session.execute("DROP KEYSPACE testkrbproxy") - - cluster.shutdown() - - def _setup_for_proxy(self, grant=True): - os.environ['KRB5_CONFIG'] = self.krb_conf - self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf) - auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM') - cluster = TestCluster(auth_provider=auth_provider) - session = cluster.connect() - - stmts = [ - "CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('bob@DATASTAX.COM'), - "CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('bob@DATASTAX.COM'), - "GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to 'bob@DATASTAX.COM'", - "CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('charlie@DATASTAX.COM'), - "GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to 'charlie@DATASTAX.COM'", - # Create a keyspace and allow only charlie to query it. - "CREATE KEYSPACE testkrbproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - "CREATE TABLE testkrbproxy.testproxy (id int PRIMARY KEY, value text)", - "GRANT ALL PERMISSIONS ON KEYSPACE testkrbproxy to '{0}'".format('charlie@DATASTAX.COM'), - ] - - if grant: - stmts.append("GRANT PROXY.LOGIN ON ROLE '{0}' to '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM')) - - wait_role_manager_setup_then_execute(session, stmts) - - cluster.shutdown() - - -def clear_kerberos_tickets(): - subprocess.call(['kdestroy'], shell=False) - - -@attr('long') -@requiredse -class BaseDseProxyAuthTest(unittest.TestCase): - - @classmethod - def setUpClass(self): - """ - This will setup the necessary infrastructure to run unified authentication tests. - """ - if not DSE_VERSION or DSE_VERSION < Version('5.1'): - return - self.cluster = None - - ccm_cluster = get_cluster() - # Stop cluster if running and configure it with the correct options - ccm_cluster.stop() - if isinstance(ccm_cluster, DseCluster): - # Setup dse options in dse.yaml - config_options = {'authentication_options': {'enabled': 'true', - 'default_scheme': 'internal', - 'scheme_permissions': 'true', - 'transitional_mode': 'normal'}, - 'authorization_options': {'enabled': 'true'} - } - - # Setup dse authenticator in cassandra.yaml - ccm_cluster.set_configuration_options({ - 'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator', - 'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer' - }) - ccm_cluster.set_dse_configuration_options(config_options) - ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) - else: - log.error("Cluster is not dse cluster test will fail") - - # Create users and test keyspace - self.user_role = 'user1' - self.server_role = 'server' - self.root_cluster = TestCluster(auth_provider=DSEPlainTextAuthProvider('cassandra', 'cassandra')) - self.root_session = self.root_cluster.connect() - - stmts = [ - "CREATE USER {0} WITH PASSWORD '{1}'".format(self.server_role, self.server_role), - "CREATE USER {0} WITH PASSWORD '{1}'".format(self.user_role, self.user_role), - "CREATE KEYSPACE testproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}", - "CREATE TABLE testproxy.testproxy (id int PRIMARY KEY, value text)", - "GRANT ALL PERMISSIONS ON KEYSPACE testproxy to {0}".format(self.user_role) - ] - - wait_role_manager_setup_then_execute(self.root_session, stmts) - - @classmethod - def tearDownClass(self): - """ - Shutdown the root session. - """ - if not DSE_VERSION or DSE_VERSION < Version('5.1'): - return - self.root_session.execute('DROP KEYSPACE testproxy;') - self.root_session.execute('DROP USER {0}'.format(self.user_role)) - self.root_session.execute('DROP USER {0}'.format(self.server_role)) - self.root_cluster.shutdown() - - def tearDown(self): - """ - Shutdown the cluster and reset proxy permissions - """ - self.cluster.shutdown() - - self.root_session.execute("REVOKE PROXY.LOGIN ON ROLE {0} from {1}".format(self.user_role, self.server_role)) - self.root_session.execute("REVOKE PROXY.EXECUTE ON ROLE {0} from {1}".format(self.user_role, self.server_role)) - - def grant_proxy_login(self): - """ - Grant PROXY.LOGIN permission on a role to a specific user. - """ - self.root_session.execute("GRANT PROXY.LOGIN on role {0} to {1}".format(self.user_role, self.server_role)) - - def grant_proxy_execute(self): - """ - Grant PROXY.EXECUTE permission on a role to a specific user. - """ - self.root_session.execute("GRANT PROXY.EXECUTE on role {0} to {1}".format(self.user_role, self.server_role)) - - -@attr('long') -@greaterthanorequaldse51 -class DseProxyAuthTest(BaseDseProxyAuthTest): - """ - Tests Unified Auth. Proxy Login using SASL and Proxy Execute. - """ - - @classmethod - def get_sasl_options(self, mechanism='PLAIN'): - sasl_options = { - "service": 'dse', - "username": 'server', - "mechanism": mechanism, - 'password': self.server_role, - 'authorization_id': self.user_role - } - return sasl_options - - def connect_and_query(self, auth_provider, execute_as=None, query="SELECT * FROM testproxy.testproxy"): - self.cluster = TestCluster(auth_provider=auth_provider) - self.session = self.cluster.connect() - rs = self.session.execute(query, execute_as=execute_as) - return rs - - def test_proxy_login_forbidden(self): - """ - Test that a proxy login is forbidden by default for a user. - @since 3.20 - @jira_ticket PYTHON-662 - @test_category dse auth - @expected_result connect and query should not be allowed - """ - auth_provider = SaslAuthProvider(**self.get_sasl_options()) - with self.assertRaises(Unauthorized): - self.connect_and_query(auth_provider) - - def test_proxy_login_allowed(self): - """ - Test that a proxy login is allowed with proper permissions. - @since 3.20 - @jira_ticket PYTHON-662 - @test_category dse auth - @expected_result connect and query should be allowed - """ - auth_provider = SaslAuthProvider(**self.get_sasl_options()) - self.grant_proxy_login() - self.connect_and_query(auth_provider) - - def test_proxy_execute_forbidden(self): - """ - Test that a proxy execute is forbidden by default for a user. - @since 3.20 - @jira_ticket PYTHON-662 - @test_category dse auth - @expected_result connect and query should not be allowed - """ - auth_provider = DSEPlainTextAuthProvider(self.server_role, self.server_role) - with self.assertRaises(Unauthorized): - self.connect_and_query(auth_provider, execute_as=self.user_role) - - def test_proxy_execute_allowed(self): - """ - Test that a proxy execute is allowed with proper permissions. - @since 3.20 - @jira_ticket PYTHON-662 - @test_category dse auth - @expected_result connect and query should be allowed - """ - auth_provider = DSEPlainTextAuthProvider(self.server_role, self.server_role) - self.grant_proxy_execute() - self.connect_and_query(auth_provider, execute_as=self.user_role) - - def test_connection_with_transitional_mode(self): - """ - Test that the driver can connect using TransitionalModePlainTextAuthProvider - @since 3.20 - @jira_ticket PYTHON-831 - @test_category dse auth - @expected_result connect and query should be allowed - """ - auth_provider = TransitionalModePlainTextAuthProvider() - self.assertIsNotNone(self.connect_and_query(auth_provider, query="SELECT * from system.local WHERE key='local'")) diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py deleted file mode 100644 index 99de82647d..0000000000 --- a/tests/integration/advanced/test_cont_paging.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from tests.integration import use_singledc, greaterthanorequaldse51, BasicSharedKeyspaceUnitTestCaseRF3WM, \ - DSE_VERSION, ProtocolVersion, greaterthanorequaldse60, requiredse, TestCluster - -import logging -log = logging.getLogger(__name__) - -import unittest - -from itertools import cycle, count -from packaging.version import Version -import time - -from cassandra.cluster import ExecutionProfile, ContinuousPagingOptions -from cassandra.concurrent import execute_concurrent -from cassandra.query import SimpleStatement - - -def setup_module(): - if DSE_VERSION: - use_singledc() - - -@requiredse -class BaseContPagingTests(): - @classmethod - def setUpClass(cls): - if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: - return - - cls.execution_profiles = {"CONTDEFAULT": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions()), - "ONEPAGE": ExecutionProfile( - continuous_paging_options=ContinuousPagingOptions(max_pages=1)), - "MANYPAGES": ExecutionProfile( - continuous_paging_options=ContinuousPagingOptions(max_pages=10)), - "BYTES": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions( - page_unit=ContinuousPagingOptions.PagingUnit.BYTES)), - "SLOW": ExecutionProfile( - continuous_paging_options=ContinuousPagingOptions(max_pages_per_second=1)), } - cls.sane_eps = ["CONTDEFAULT", "BYTES"] - - @classmethod - def tearDownClass(cls): - if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: - return - - @classmethod - def create_cluster(cls): - - cls.cluster_with_profiles = TestCluster(protocol_version=cls.protocol_version, execution_profiles=cls.execution_profiles) - - cls.session_with_profiles = cls.cluster_with_profiles.connect(wait_for_all_pools=True) - statements_and_params = zip( - cycle(["INSERT INTO " + cls.ks_name + "." + cls.ks_name + " (k, v) VALUES (%s, 0)"]), - [(i,) for i in range(150)]) - execute_concurrent(cls.session_with_profiles, list(statements_and_params)) - - cls.select_all_statement = "SELECT * FROM {0}.{0}".format(cls.ks_name) - - def test_continous_paging(self): - """ - Test to ensure that various continuous paging schemes return the full set of results. - @since 3.20 - @jira_ticket PYTHON-615 - @expected_result various continous paging options should fetch all the results - - @test_category queries - """ - for ep in self.execution_profiles.keys(): - results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= ep)) - self.assertEqual(len(results), 150) - - - def test_page_fetch_size(self): - """ - Test to ensure that continuous paging works appropriately with fetch size. - @since 3.20 - @jira_ticket PYTHON-615 - @expected_result continuous paging options should work sensibly with various fetch size - - @test_category queries - """ - - # Since we fetch one page at a time results should match fetch size - for fetch_size in (2, 3, 7, 10, 99, 100, 101, 150): - self.session_with_profiles.default_fetch_size = fetch_size - results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= "ONEPAGE")) - self.assertEqual(len(results), fetch_size) - - # Since we fetch ten pages at a time results should match fetch size * 10 - for fetch_size in (2, 3, 7, 10, 15): - self.session_with_profiles.default_fetch_size = fetch_size - results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= "MANYPAGES")) - self.assertEqual(len(results), fetch_size*10) - - # Default settings for continuous paging should be able to fetch all results regardless of fetch size - # Changing the units should, not affect the number of results, if max_pages is not set - for profile in self.sane_eps: - for fetch_size in (2, 3, 7, 10, 15): - self.session_with_profiles.default_fetch_size = fetch_size - results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= profile)) - self.assertEqual(len(results), 150) - - # This should take around 3 seconds to fetch but should still complete with all results - self.session_with_profiles.default_fetch_size = 50 - results = list(self.session_with_profiles.execute(self.select_all_statement, execution_profile= "SLOW")) - self.assertEqual(len(results), 150) - - def test_paging_cancel(self): - """ - Test to ensure we can cancel a continuous paging session once it's started - @since 3.20 - @jira_ticket PYTHON-615 - @expected_result This query should be canceled before any sizable amount of results can be returned - @test_category queries - """ - - self.session_with_profiles.default_fetch_size = 1 - # This combination should fetch one result a second. We should see a very few results - results = self.session_with_profiles.execute_async(self.select_all_statement, execution_profile= "SLOW") - result_set =results.result() - result_set.cancel_continuous_paging() - result_lst =list(result_set) - self.assertLess(len(result_lst), 2, "Cancel should have aborted fetch immediately") - - def test_con_paging_verify_writes(self): - """ - Test to validate results with a few continuous paging options - @since 3.20 - @jira_ticket PYTHON-615 - @expected_result all results should be returned correctly - @test_category queries - """ - prepared = self.session_with_profiles.prepare(self.select_all_statement) - - - for ep in self.sane_eps: - for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): - self.session_with_profiles.default_fetch_size = fetch_size - results = self.session_with_profiles.execute(self.select_all_statement, execution_profile=ep) - result_array = set() - result_set = set() - for result in results: - result_array.add(result.k) - result_set.add(result.v) - - self.assertEqual(set(range(150)), result_array) - self.assertEqual(set([0]), result_set) - - statement = SimpleStatement(self.select_all_statement) - results = self.session_with_profiles.execute(statement, execution_profile=ep) - result_array = set() - result_set = set() - for result in results: - result_array.add(result.k) - result_set.add(result.v) - - self.assertEqual(set(range(150)), result_array) - self.assertEqual(set([0]), result_set) - - results = self.session_with_profiles.execute(prepared, execution_profile=ep) - result_array = set() - result_set = set() - for result in results: - result_array.add(result.k) - result_set.add(result.v) - - self.assertEqual(set(range(150)), result_array) - self.assertEqual(set([0]), result_set) - - def test_can_get_results_when_no_more_pages(self): - """ - Test to validate that the resutls can be fetched when - has_more_pages is False - @since 3.20 - @jira_ticket PYTHON-946 - @expected_result the results can be fetched - @test_category queries - """ - generator_expanded = [] - def get_all_rows(generator, future, generator_expanded): - self.assertFalse(future.has_more_pages) - - generator_expanded.extend(list(generator)) - print("Setting generator_expanded to True") - - future = self.session_with_profiles.execute_async("SELECT * from system.local LIMIT 10", - execution_profile="CONTDEFAULT") - future.add_callback(get_all_rows, future, generator_expanded) - time.sleep(5) - self.assertTrue(generator_expanded) - - -@requiredse -@greaterthanorequaldse51 -class ContPagingTestsDSEV1(BaseContPagingTests, BasicSharedKeyspaceUnitTestCaseRF3WM): - @classmethod - def setUpClass(cls): - cls.required_dse_version = BaseContPagingTests.required_dse_version = Version('5.1') - if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: - return - - BasicSharedKeyspaceUnitTestCaseRF3WM.setUpClass() - BaseContPagingTests.setUpClass() - - cls.protocol_version = ProtocolVersion.DSE_V1 - cls.create_cluster() - - -@requiredse -@greaterthanorequaldse60 -class ContPagingTestsDSEV2(BaseContPagingTests, BasicSharedKeyspaceUnitTestCaseRF3WM): - @classmethod - def setUpClass(cls): - cls.required_dse_version = BaseContPagingTests.required_dse_version = Version('6.0') - if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: - return - - BasicSharedKeyspaceUnitTestCaseRF3WM.setUpClass() - BaseContPagingTests.setUpClass() - - more_profiles = { - "SMALL_QUEUE": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions(max_queue_size=2)), - "BIG_QUEUE": ExecutionProfile(continuous_paging_options=ContinuousPagingOptions(max_queue_size=400)) - } - cls.sane_eps += ["SMALL_QUEUE", "BIG_QUEUE"] - cls.execution_profiles.update(more_profiles) - - cls.protocol_version = ProtocolVersion.DSE_V2 - cls.create_cluster() diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py deleted file mode 100644 index b2e4d4ba9e..0000000000 --- a/tests/integration/advanced/test_cqlengine_where_operators.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import os -import time - -from cassandra.cqlengine import columns, connection, models -from cassandra.cqlengine.management import (CQLENG_ALLOW_SCHEMA_MANAGEMENT, - create_keyspace_simple, drop_table, - sync_table) -from cassandra.cqlengine.statements import IsNotNull -from tests.integration import DSE_VERSION, requiredse, CASSANDRA_IP, greaterthanorequaldse60, TestCluster -from tests.integration.advanced import use_single_node_with_graph_and_solr -from tests.integration.cqlengine import DEFAULT_KEYSPACE - - -class SimpleNullableModel(models.Model): - __keyspace__ = DEFAULT_KEYSPACE - partition = columns.Integer(primary_key=True) - nullable = columns.Integer(required=False) - # nullable = columns.Integer(required=False, custom_index=True) - - -def setup_module(): - if DSE_VERSION: - os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' - use_single_node_with_graph_and_solr() - setup_connection(DEFAULT_KEYSPACE) - create_keyspace_simple(DEFAULT_KEYSPACE, 1) - sync_table(SimpleNullableModel) - - -def setup_connection(keyspace_name): - connection.setup([CASSANDRA_IP], - # consistency=ConsistencyLevel.ONE, - # protocol_version=PROTOCOL_VERSION, - default_keyspace=keyspace_name) - - -def teardown_module(): - if DSE_VERSION: - drop_table(SimpleNullableModel) - - -@requiredse -class IsNotNullTests(unittest.TestCase): - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - cls.cluster = TestCluster() - - @greaterthanorequaldse60 - def test_is_not_null_execution(self): - """ - Verify that CQL statements have correct syntax when executed - If we wanted them to return something meaningful and not a InvalidRequest - we'd have to create an index in search for the column we are using - IsNotNull - - @since 3.20 - @jira_ticket PYTHON-968 - @expected_result InvalidRequest is arisen - - @test_category cqlengine - """ - cluster = TestCluster() - self.addCleanup(cluster.shutdown) - session = cluster.connect() - - SimpleNullableModel.create(partition=1, nullable=2) - SimpleNullableModel.create(partition=2, nullable=None) - - self.addCleanup(session.execute, "DROP SEARCH INDEX ON {}".format( - SimpleNullableModel.column_family_name())) - create_index_stmt = ( - "CREATE SEARCH INDEX ON {} WITH COLUMNS nullable " - "".format(SimpleNullableModel.column_family_name())) - session.execute(create_index_stmt) - - SimpleNullableModel.create(partition=1, nullable=1) - SimpleNullableModel.create(partition=2, nullable=None) - - # TODO: block on indexing more precisely - time.sleep(5) - - self.assertEqual(len(list(SimpleNullableModel.objects.all())), 2) - self.assertEqual( - len(list( - SimpleNullableModel.filter(IsNotNull("nullable"), partition__eq=2) - )), - 0) - self.assertEqual( - len(list( - SimpleNullableModel.filter(IsNotNull("nullable"), partition__eq=1) - )), - 1) diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py deleted file mode 100644 index 6a6737bd50..0000000000 --- a/tests/integration/advanced/test_geometry.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from tests.integration import DSE_VERSION, requiredse -from tests.integration.advanced import BasicGeometricUnitTestCase, use_single_node_with_graph -from cassandra.util import OrderedMap, sortedset -from collections import namedtuple - -import unittest -from uuid import uuid1 -from cassandra.util import Point, LineString, Polygon -from cassandra.cqltypes import LineStringType, PointType, PolygonType - - -def setup_module(): - if DSE_VERSION: - use_single_node_with_graph() - - -class AbstractGeometricTypeTest(): - - original_value = "" - - def test_should_insert_simple(self): - """ - This tests will attempt to insert a point, polygon, or line, using simple inline formating. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried. - """ - uuid_key = uuid1() - self.session.execute("INSERT INTO tbl (k, g) VALUES (%s, %s)", [uuid_key, self.original_value]) - self.validate('g', uuid_key, self.original_value) - - def test_should_insert_simple_prepared(self): - """ - This tests will attempt to insert a point, polygon, or line, using prepared statements. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, g) VALUES (?, ?)") - self.session.execute(prepared, (uuid_key, self.original_value)) - self.validate('g', uuid_key, self.original_value) - - def test_should_insert_simple_prepared_with_bound(self): - """ - This tests will attempt to insert a point, polygon, or line, using prepared statements and bind. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, g) VALUES (?, ?)") - bound_statement = prepared.bind((uuid_key, self.original_value)) - self.session.execute(bound_statement) - self.validate('g', uuid_key, self.original_value) - - def test_should_insert_as_list(self): - """ - This tests will attempt to insert a point, polygon, or line, as values of list. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as a list. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, l) VALUES (?, ?)") - bound_statement = prepared.bind((uuid_key, [self.original_value])) - self.session.execute(bound_statement) - self.validate('l', uuid_key, [self.original_value]) - - def test_should_insert_as_set(self): - """ - This tests will attempt to insert a point, polygon, or line, as values of set. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as a set. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, s) VALUES (?, ?)") - bound_statement = prepared.bind((uuid_key, sortedset([self.original_value]))) - self.session.execute(bound_statement) - self.validate('s', uuid_key, sortedset([self.original_value])) - - def test_should_insert_as_map_keys(self): - """ - This tests will attempt to insert a point, polygon, or line, as keys of a map. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as keys of a map. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, m0) VALUES (?, ?)") - bound_statement = prepared.bind((uuid_key, OrderedMap(zip([self.original_value], [1])))) - self.session.execute(bound_statement) - self.validate('m0', uuid_key, OrderedMap(zip([self.original_value], [1]))) - - def test_should_insert_as_map_values(self): - """ - This tests will attempt to insert a point, polygon, or line, as values of a map. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as values of a map. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, m1) VALUES (?, ?)") - bound_statement = prepared.bind((uuid_key, OrderedMap(zip([1], [self.original_value])))) - self.session.execute(bound_statement) - self.validate('m1', uuid_key, OrderedMap(zip([1], [self.original_value]))) - - def test_should_insert_as_tuple(self): - """ - This tests will attempt to insert a point, polygon, or line, as values of a tuple. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as values of a tuple. - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, t) VALUES (?, ?)") - bound_statement = prepared.bind((uuid_key, (self.original_value, self.original_value, self.original_value))) - self.session.execute(bound_statement) - self.validate('t', uuid_key, (self.original_value, self.original_value, self.original_value)) - - def test_should_insert_as_udt(self): - """ - This tests will attempt to insert a point, polygon, or line, as members of a udt. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as members of a udt. - """ - UDT1 = namedtuple('udt1', ('g')) - self.cluster.register_user_type(self.ks_name, 'udt1', UDT1) - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, u) values (?, ?)") - bound_statement = prepared.bind((uuid_key, UDT1(self.original_value))) - self.session.execute(bound_statement) - rs = self.session.execute("SELECT {0} from {1} where k={2}".format('u', 'tbl', uuid_key)) - retrieved_udt = rs[0]._asdict()['u'] - - self.assertEqual(retrieved_udt.g, self.original_value) - - def test_should_accept_as_partition_key(self): - """ - This tests will attempt to insert a point, polygon, or line, as a partition key. - @since 3.20 - @jira_ticket PYTHON-456 - @test_category dse geometric - @expected_result geometric types should be able to be inserted and queried as a partition key. - """ - prepared = self.session.prepare("INSERT INTO tblpk (k, v) VALUES (?, ?)") - bound_statement = prepared.bind((self.original_value, 1)) - self.session.execute(bound_statement) - rs = self.session.execute("SELECT k, v FROM tblpk") - foundpk = rs[0]._asdict()['k'] - self.assertEqual(foundpk, self.original_value) - - def validate(self, value, key, expected): - """ - Simple utility method used for validation of inserted types. - """ - rs = self.session.execute("SELECT {0} from tbl where k={1}".format(value, key)) - retrieved = rs[0]._asdict()[value] - self.assertEqual(expected, retrieved) - - def test_insert_empty_with_string(self): - """ - This tests will attempt to insert a point, polygon, or line, as Empty - @since 3.20 - @jira_ticket PYTHON-481 - @test_category dse geometric - @expected_result EMPTY as a keyword should be honored - """ - uuid_key = uuid1() - self.session.execute("INSERT INTO tbl (k, g) VALUES (%s, %s)", [uuid_key, self.empty_statement]) - self.validate('g', uuid_key, self.empty_value) - - def test_insert_empty_with_object(self): - """ - This tests will attempt to insert a point, polygon, or line, as Empty - @since 3.20 - @jira_ticket PYTHON-481 - @test_category dse geometric - @expected_result EMPTY as a keyword should be used with empty objects - """ - uuid_key = uuid1() - prepared = self.session.prepare("INSERT INTO tbl (k, g) VALUES (?, ?)") - self.session.execute(prepared, (uuid_key, self.empty_value)) - self.validate('g', uuid_key, self.empty_value) - - -@requiredse -class BasicGeometricPointTypeTest(AbstractGeometricTypeTest, BasicGeometricUnitTestCase): - """ - Runs all the geometric tests against PointType - """ - cql_type_name = "'{0}'".format(PointType.typename) - original_value = Point(.5, .13) - - @unittest.skip("Empty String") - def test_insert_empty_with_string(self): - pass - - @unittest.skip("Empty String") - def test_insert_empty_with_object(self): - pass - - -@requiredse -class BasicGeometricLineStringTypeTest(AbstractGeometricTypeTest, BasicGeometricUnitTestCase): - """ - Runs all the geometric tests against LineStringType - """ - cql_type_name = cql_type_name = "'{0}'".format(LineStringType.typename) - original_value = LineString(((1, 2), (3, 4), (9871234, 1235487215))) - empty_statement = 'LINESTRING EMPTY' - empty_value = LineString() - - -@requiredse -class BasicGeometricPolygonTypeTest(AbstractGeometricTypeTest, BasicGeometricUnitTestCase): - """ - Runs all the geometric tests against PolygonType - """ - cql_type_name = cql_type_name = "'{0}'".format(PolygonType.typename) - original_value = Polygon([(10.0, 10.0), (110.0, 10.0), (110., 110.0), (10., 110.0), (10., 10.0)], [[(20., 20.0), (20., 30.0), (30., 30.0), (30., 20.0), (20., 20.0)], [(40., 20.0), (40., 30.0), (50., 30.0), (50., 20.0), (40., 20.0)]]) - empty_statement = 'POLYGON EMPTY' - empty_value = Polygon() diff --git a/tests/integration/advanced/test_spark.py b/tests/integration/advanced/test_spark.py deleted file mode 100644 index a307913abb..0000000000 --- a/tests/integration/advanced/test_spark.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from cassandra.cluster import EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT -from cassandra.graph import SimpleGraphStatement -from tests.integration import DSE_VERSION, requiredse -from tests.integration.advanced import use_singledc_wth_graph_and_spark, find_spark_master -from tests.integration.advanced.graph import BasicGraphUnitTestCase, ClassicGraphFixtures -log = logging.getLogger(__name__) - - -def setup_module(): - if DSE_VERSION: - use_singledc_wth_graph_and_spark() - - -@requiredse -class SparkLBTests(BasicGraphUnitTestCase): - """ - Test to validate that analtics query can run in a multi-node enviroment. Also check to to ensure - that the master spark node is correctly targeted when OLAP queries are run - - @since 3.20 - @jira_ticket PYTHON-510 - @expected_result OLAP results should come back correctly, master spark coordinator should always be picked. - @test_category dse graph - """ - def test_spark_analytic_query(self): - self.session.execute_graph(ClassicGraphFixtures.classic()) - spark_master = find_spark_master(self.session) - - # Run multipltle times to ensure we don't round robin - for i in range(3): - to_run = SimpleGraphStatement("g.V().count()") - rs = self.session.execute_graph(to_run, execution_profile=EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT) - self.assertEqual(rs[0].value, 7) - self.assertEqual(rs.response_future._current_host.address, spark_master) diff --git a/tests/integration/cloud/__init__.py b/tests/integration/cloud/__init__.py deleted file mode 100644 index a6a4ab7a5d..0000000000 --- a/tests/integration/cloud/__init__.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License -from cassandra.cluster import Cluster - -import unittest - -import os -import subprocess - -from tests.integration import CLOUD_PROXY_PATH, USE_CASS_EXTERNAL - - -def setup_package(): - if CLOUD_PROXY_PATH and not USE_CASS_EXTERNAL: - start_cloud_proxy() - - -def teardown_package(): - if not USE_CASS_EXTERNAL: - stop_cloud_proxy() - - -class CloudProxyCluster(unittest.TestCase): - - creds_dir = os.path.join(os.path.abspath(CLOUD_PROXY_PATH or ''), 'certs/bundles/') - creds = os.path.join(creds_dir, 'creds-v1.zip') - creds_no_auth = os.path.join(creds_dir, 'creds-v1-wo-creds.zip') - creds_unreachable = os.path.join(creds_dir, 'creds-v1-unreachable.zip') - creds_invalid_ca = os.path.join(creds_dir, 'creds-v1-invalid-ca.zip') - - cluster, connect = None, False - session = None - - @classmethod - def connect(cls, creds, **kwargs): - cloud_config = { - 'secure_connect_bundle': creds, - } - cls.cluster = Cluster(cloud=cloud_config, protocol_version=4, **kwargs) - cls.session = cls.cluster.connect(wait_for_all_pools=True) - - def tearDown(self): - if self.cluster: - self.cluster.shutdown() - - -class CloudProxyServer(object): - """ - Class for starting and stopping the proxy (sni_single_endpoint) - """ - - ccm_command = 'docker exec $(docker ps -a -q --filter ancestor=single_endpoint) ccm {}' - - def __init__(self, CLOUD_PROXY_PATH): - self.CLOUD_PROXY_PATH = CLOUD_PROXY_PATH - self.running = False - - def start(self): - return_code = subprocess.call( - ['REQUIRE_CLIENT_CERTIFICATE=true ./run.sh'], - cwd=self.CLOUD_PROXY_PATH, - shell=True) - if return_code != 0: - raise Exception("Error while starting proxy server") - self.running = True - - def stop(self): - if self.is_running(): - subprocess.call( - ["docker kill $(docker ps -a -q --filter ancestor=single_endpoint)"], - shell=True) - self.running = False - - def is_running(self): - return self.running - - def start_node(self, id): - subcommand = 'node{} start --jvm_arg "-Ddse.product_type=DATASTAX_APOLLO" --root --wait-for-binary-proto'.format(id) - subprocess.call( - [self.ccm_command.format(subcommand)], - shell=True) - - def stop_node(self, id): - subcommand = 'node{} stop'.format(id) - subprocess.call( - [self.ccm_command.format(subcommand)], - shell=True) - - -CLOUD_PROXY_SERVER = CloudProxyServer(CLOUD_PROXY_PATH) - - -def start_cloud_proxy(): - """ - Starts and waits for the proxy to run - """ - CLOUD_PROXY_SERVER.stop() - CLOUD_PROXY_SERVER.start() - - -def stop_cloud_proxy(): - CLOUD_PROXY_SERVER.stop() diff --git a/tests/integration/cloud/conftest.py b/tests/integration/cloud/conftest.py deleted file mode 100644 index fb08b04194..0000000000 --- a/tests/integration/cloud/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ -import pytest - -from tests.integration.cloud import setup_package, teardown_package - -@pytest.fixture(scope='session', autouse=True) -def setup_and_teardown_packages(): - setup_package() - yield - teardown_package() \ No newline at end of file diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py deleted file mode 100644 index 514314d81e..0000000000 --- a/tests/integration/cloud/test_cloud.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License -from cassandra.datastax.cloud import parse_metadata_info -from cassandra.query import SimpleStatement -from cassandra.cqlengine import connection -from cassandra.cqlengine.management import sync_table, create_keyspace_simple -from cassandra.cqlengine.models import Model -from cassandra.cqlengine import columns -from cassandra import DriverException, ConsistencyLevel, InvalidRequest -from cassandra.cluster import NoHostAvailable, ExecutionProfile, Cluster, _execution_profile_to_string -from cassandra.connection import SniEndPoint -from cassandra.auth import PlainTextAuthProvider -from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy, ConstantReconnectionPolicy - -from ssl import SSLContext, PROTOCOL_TLS -from unittest.mock import patch - -from tests.integration import requirescloudproxy -from tests.util import wait_until_not_raised -from tests.integration.cloud import CloudProxyCluster, CLOUD_PROXY_SERVER - -DISALLOWED_CONSISTENCIES = [ - ConsistencyLevel.ANY, - ConsistencyLevel.ONE, - ConsistencyLevel.LOCAL_ONE -] - - -@requirescloudproxy -class CloudTests(CloudProxyCluster): - def hosts_up(self): - return [h for h in self.cluster.metadata.all_hosts() if h.is_up] - - def test_resolve_and_connect(self): - self.connect(self.creds) - - self.assertEqual(len(self.hosts_up()), 3) - for host in self.cluster.metadata.all_hosts(): - self.assertTrue(host.is_up) - self.assertIsInstance(host.endpoint, SniEndPoint) - self.assertEqual(str(host.endpoint), "{}:{}:{}".format( - host.endpoint.address, host.endpoint.port, host.host_id)) - self.assertIn(host.endpoint._resolved_address, ("127.0.0.1", '::1')) - - def test_match_system_local(self): - self.connect(self.creds) - - self.assertEqual(len(self.hosts_up()), 3) - for host in self.cluster.metadata.all_hosts(): - row = self.session.execute("SELECT * FROM system.local WHERE key='local'", host=host).one() - self.assertEqual(row.host_id, host.host_id) - self.assertEqual(row.rpc_address, host.broadcast_rpc_address) - - def test_set_auth_provider(self): - self.connect(self.creds) - self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider) - self.assertEqual(self.cluster.auth_provider.username, 'user1') - self.assertEqual(self.cluster.auth_provider.password, 'user1') - - def test_support_leaving_the_auth_unset(self): - with self.assertRaises(NoHostAvailable): - self.connect(self.creds_no_auth) - self.assertIsNone(self.cluster.auth_provider) - - def test_support_overriding_auth_provider(self): - try: - self.connect(self.creds, auth_provider=PlainTextAuthProvider('invalid', 'invalid')) - except: - pass # this will fail soon when sni_single_endpoint is updated - self.assertIsInstance(self.cluster.auth_provider, PlainTextAuthProvider) - self.assertEqual(self.cluster.auth_provider.username, 'invalid') - self.assertEqual(self.cluster.auth_provider.password, 'invalid') - - def test_error_overriding_ssl_context(self): - with self.assertRaises(ValueError) as cm: - self.connect(self.creds, ssl_context=SSLContext(PROTOCOL_TLS)) - - self.assertIn('cannot be specified with a cloud configuration', str(cm.exception)) - - def test_error_overriding_ssl_options(self): - with self.assertRaises(ValueError) as cm: - self.connect(self.creds, ssl_options={'check_hostname': True}) - - self.assertIn('cannot be specified with a cloud configuration', str(cm.exception)) - - def _bad_hostname_metadata(self, config, http_data): - config = parse_metadata_info(config, http_data) - config.sni_host = "127.0.0.1" - return config - - def test_verify_hostname(self): - with patch('cassandra.datastax.cloud.parse_metadata_info', wraps=self._bad_hostname_metadata): - with self.assertRaises(NoHostAvailable) as e: - self.connect(self.creds) - self.assertIn("hostname", str(e.exception).lower()) - - def test_error_when_bundle_doesnt_exist(self): - try: - self.connect('/invalid/path/file.zip') - except Exception as e: - self.assertIsInstance(e, FileNotFoundError) - - def test_load_balancing_policy_is_dcawaretokenlbp(self): - self.connect(self.creds) - self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy, - TokenAwarePolicy) - self.assertIsInstance(self.cluster.profile_manager.default.load_balancing_policy._child_policy, - DCAwareRoundRobinPolicy) - - def test_resolve_and_reconnect_on_node_down(self): - - self.connect(self.creds, - idle_heartbeat_interval=1, idle_heartbeat_timeout=1, - reconnection_policy=ConstantReconnectionPolicy(120)) - - self.assertEqual(len(self.hosts_up()), 3) - CLOUD_PROXY_SERVER.stop_node(1) - wait_until_not_raised( - lambda: self.assertEqual(len(self.hosts_up()), 2), - 0.02, 250) - - host = [h for h in self.cluster.metadata.all_hosts() if not h.is_up][0] - with patch.object(SniEndPoint, "resolve", wraps=host.endpoint.resolve) as mocked_resolve: - CLOUD_PROXY_SERVER.start_node(1) - wait_until_not_raised( - lambda: self.assertEqual(len(self.hosts_up()), 3), - 0.02, 250) - mocked_resolve.assert_called() - - def test_metadata_unreachable(self): - with self.assertRaises(DriverException) as cm: - self.connect(self.creds_unreachable, connect_timeout=1) - - self.assertIn('Unable to connect to the metadata service', str(cm.exception)) - - def test_metadata_ssl_error(self): - with self.assertRaises(DriverException) as cm: - self.connect(self.creds_invalid_ca) - - self.assertIn('Unable to connect to the metadata', str(cm.exception)) - - def test_default_consistency(self): - self.connect(self.creds) - self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) - # Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT, - # EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT - for ep_key in self.cluster.profile_manager.profiles.keys(): - ep = self.cluster.profile_manager.profiles[ep_key] - self.assertEqual( - ep.consistency_level, - ConsistencyLevel.LOCAL_QUORUM, - "Expecting LOCAL QUORUM for profile {}, but got {} instead".format( - _execution_profile_to_string(ep_key), ConsistencyLevel.value_to_name[ep.consistency_level] - )) - - def test_default_consistency_of_execution_profiles(self): - cloud_config = {'secure_connect_bundle': self.creds} - self.cluster = Cluster(cloud=cloud_config, protocol_version=4, execution_profiles={ - 'pre_create_default_ep': ExecutionProfile(), - 'pre_create_changed_ep': ExecutionProfile( - consistency_level=ConsistencyLevel.LOCAL_ONE, - ), - }) - self.cluster.add_execution_profile('pre_connect_default_ep', ExecutionProfile()) - self.cluster.add_execution_profile( - 'pre_connect_changed_ep', - ExecutionProfile( - consistency_level=ConsistencyLevel.LOCAL_ONE, - ) - ) - session = self.cluster.connect(wait_for_all_pools=True) - - self.cluster.add_execution_profile('post_connect_default_ep', ExecutionProfile()) - self.cluster.add_execution_profile( - 'post_connect_changed_ep', - ExecutionProfile( - consistency_level=ConsistencyLevel.LOCAL_ONE, - ) - ) - - for default in ['pre_create_default_ep', 'pre_connect_default_ep', 'post_connect_default_ep']: - cl = self.cluster.profile_manager.profiles[default].consistency_level - self.assertEqual( - cl, ConsistencyLevel.LOCAL_QUORUM, - "Expecting LOCAL QUORUM for profile {}, but got {} instead".format(default, cl) - ) - for changed in ['pre_create_changed_ep', 'pre_connect_changed_ep', 'post_connect_changed_ep']: - cl = self.cluster.profile_manager.profiles[changed].consistency_level - self.assertEqual( - cl, ConsistencyLevel.LOCAL_ONE, - "Expecting LOCAL ONE for profile {}, but got {} instead".format(default, cl) - ) - - def test_consistency_guardrails(self): - self.connect(self.creds) - self.session.execute( - "CREATE KEYSPACE IF NOT EXISTS test_consistency_guardrails " - "with replication={'class': 'SimpleStrategy', 'replication_factor': 1}" - ) - self.session.execute("CREATE TABLE IF NOT EXISTS test_consistency_guardrails.guardrails (id int primary key)") - for consistency in DISALLOWED_CONSISTENCIES: - statement = SimpleStatement( - "INSERT INTO test_consistency_guardrails.guardrails (id) values (1)", - consistency_level=consistency - ) - with self.assertRaises(InvalidRequest) as e: - self.session.execute(statement) - self.assertIn('not allowed for Write Consistency Level', str(e.exception)) - - # Sanity check to make sure we can do a normal insert - statement = SimpleStatement( - "INSERT INTO test_consistency_guardrails.guardrails (id) values (1)", - consistency_level=ConsistencyLevel.LOCAL_QUORUM - ) - try: - self.session.execute(statement) - except InvalidRequest: - self.fail("InvalidRequest was incorrectly raised for write query at LOCAL QUORUM!") - - def test_cqlengine_can_connect(self): - class TestModel(Model): - id = columns.Integer(primary_key=True) - val = columns.Text() - - connection.setup(None, "test", cloud={'secure_connect_bundle': self.creds}) - create_keyspace_simple('test', 1) - sync_table(TestModel) - TestModel.objects.create(id=42, value='test') - self.assertEqual(len(TestModel.objects.all()), 1) diff --git a/tests/integration/cloud/test_cloud_schema.py b/tests/integration/cloud/test_cloud_schema.py deleted file mode 100644 index 1d52e8e428..0000000000 --- a/tests/integration/cloud/test_cloud_schema.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License -""" -This is mostly copypasta from integration/long/test_schema.py - -TODO: Come up with way to run cloud and local tests without duplication -""" - -import logging -import time - -from cassandra import ConsistencyLevel -from cassandra.cluster import Cluster -from cassandra.query import SimpleStatement - -from tests.integration import execute_until_pass -from tests.integration.cloud import CloudProxyCluster - -log = logging.getLogger(__name__) - - -class CloudSchemaTests(CloudProxyCluster): - def test_recreates(self): - """ - Basic test for repeated schema creation and use, using many different keyspaces - """ - self.connect(self.creds) - session = self.session - - for _ in self.cluster.metadata.all_hosts(): - for keyspace_number in range(5): - keyspace = "ks_{0}".format(keyspace_number) - - if keyspace in self.cluster.metadata.keyspaces.keys(): - drop = "DROP KEYSPACE {0}".format(keyspace) - log.debug(drop) - execute_until_pass(session, drop) - - create = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 3}}".format( - keyspace) - log.debug(create) - execute_until_pass(session, create) - - create = "CREATE TABLE {0}.cf (k int PRIMARY KEY, i int)".format(keyspace) - log.debug(create) - execute_until_pass(session, create) - - use = "USE {0}".format(keyspace) - log.debug(use) - execute_until_pass(session, use) - - insert = "INSERT INTO cf (k, i) VALUES (0, 0)" - log.debug(insert) - ss = SimpleStatement(insert, consistency_level=ConsistencyLevel.QUORUM) - execute_until_pass(session, ss) - - def test_for_schema_disagreement_attribute(self): - """ - Tests to ensure that schema disagreement is properly surfaced on the response future. - - Creates and destroys keyspaces/tables with various schema agreement timeouts set. - First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout. - It then validates that the correct value is set on the result. - Second part ensures that when schema agreement occurs, that the result set reflects that appropriately - - @since 3.1.0 - @jira_ticket PYTHON-458 - @expected_result is_schema_agreed is set appropriately on response thefuture - - @test_category schema - """ - # This should yield a schema disagreement - cloud_config = {'secure_connect_bundle': self.creds} - cluster = Cluster(max_schema_agreement_wait=0.001, protocol_version=4, cloud=cloud_config) - session = cluster.connect(wait_for_all_pools=True) - - rs = session.execute( - "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") - self.check_and_wait_for_agreement(session, rs, False) - rs = session.execute( - SimpleStatement("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)", - consistency_level=ConsistencyLevel.ALL)) - self.check_and_wait_for_agreement(session, rs, False) - rs = session.execute("DROP KEYSPACE test_schema_disagreement") - self.check_and_wait_for_agreement(session, rs, False) - cluster.shutdown() - - # These should have schema agreement - cluster = Cluster(protocol_version=4, max_schema_agreement_wait=100, cloud=cloud_config) - session = cluster.connect() - rs = session.execute( - "CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") - self.check_and_wait_for_agreement(session, rs, True) - rs = session.execute( - SimpleStatement("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)", - consistency_level=ConsistencyLevel.ALL)) - self.check_and_wait_for_agreement(session, rs, True) - rs = session.execute("DROP KEYSPACE test_schema_disagreement") - self.check_and_wait_for_agreement(session, rs, True) - cluster.shutdown() - - def check_and_wait_for_agreement(self, session, rs, exepected): - # Wait for RESULT_KIND_SCHEMA_CHANGE message to arrive - time.sleep(1) - self.assertEqual(rs.response_future.is_schema_agreed, exepected) - if not rs.response_future.is_schema_agreed: - session.cluster.control_connection.wait_for_schema_agreement(wait_time=1000) \ No newline at end of file diff --git a/tests/integration/cqlengine/advanced/__init__.py b/tests/integration/cqlengine/advanced/__init__.py deleted file mode 100644 index 386372eb4a..0000000000 --- a/tests/integration/cqlengine/advanced/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/tests/integration/cqlengine/advanced/test_cont_paging.py b/tests/integration/cqlengine/advanced/test_cont_paging.py deleted file mode 100644 index 95fb7dc837..0000000000 --- a/tests/integration/cqlengine/advanced/test_cont_paging.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - - -import unittest - -from packaging.version import Version - -from cassandra.cluster import (EXEC_PROFILE_DEFAULT, - ContinuousPagingOptions, ExecutionProfile, - ProtocolVersion) -from cassandra.cqlengine import columns, connection, models -from cassandra.cqlengine.management import drop_table, sync_table -from tests.integration import (DSE_VERSION, greaterthanorequaldse51, - greaterthanorequaldse60, requiredse, TestCluster) - - -class TestMultiKeyModel(models.Model): - __test__ = False - - partition = columns.Integer(primary_key=True) - cluster = columns.Integer(primary_key=True) - count = columns.Integer(required=False) - text = columns.Text(required=False) - - -def setup_module(): - if DSE_VERSION: - sync_table(TestMultiKeyModel) - for i in range(1000): - TestMultiKeyModel.create(partition=i, cluster=i, count=5, text="text to write") - - -def teardown_module(): - if DSE_VERSION: - drop_table(TestMultiKeyModel) - - -@requiredse -class BasicConcurrentTests(): - required_dse_version = None - protocol_version = None - connections = set() - sane_connections = {"CONTDEFAULT"} - - @classmethod - def setUpClass(cls): - if DSE_VERSION: - cls._create_cluster_with_cp_options("CONTDEFAULT", ContinuousPagingOptions()) - cls._create_cluster_with_cp_options("ONEPAGE", ContinuousPagingOptions(max_pages=1)) - cls._create_cluster_with_cp_options("MANYPAGES", ContinuousPagingOptions(max_pages=10)) - cls._create_cluster_with_cp_options("SLOW", ContinuousPagingOptions(max_pages_per_second=1)) - - @classmethod - def tearDownClass(cls): - if not DSE_VERSION or DSE_VERSION < cls.required_dse_version: - return - - cls.cluster_default.shutdown() - connection.set_default_connection("default") - - @classmethod - def _create_cluster_with_cp_options(cls, name, cp_options): - execution_profiles = {EXEC_PROFILE_DEFAULT: - ExecutionProfile(continuous_paging_options=cp_options)} - cls.cluster_default = TestCluster(protocol_version=cls.protocol_version, - execution_profiles=execution_profiles) - cls.session_default = cls.cluster_default.connect(wait_for_all_pools=True) - connection.register_connection(name, default=True, session=cls.session_default) - cls.connections.add(name) - - def test_continuous_paging_basic(self): - """ - Test to ensure that various continuous paging works with cqlengine - for session - @since DSE 2.4 - @jira_ticket PYTHON-872 - @expected_result various continous paging options should fetch all the results - - @test_category queries - """ - for connection_name in self.sane_connections: - connection.set_default_connection(connection_name) - row = TestMultiKeyModel.get(partition=0, cluster=0) - self.assertEqual(row.partition, 0) - self.assertEqual(row.cluster, 0) - rows = TestMultiKeyModel.objects().allow_filtering() - self.assertEqual(len(rows), 1000) - - def test_fetch_size(self): - """ - Test to ensure that various continuous paging works with different fetch sizes - for session - @since DSE 2.4 - @jira_ticket PYTHON-872 - @expected_result various continous paging options should fetch all the results - - @test_category queries - """ - for connection_name in self.connections: - conn = connection._connections[connection_name] - initial_default = conn.session.default_fetch_size - self.addCleanup( - setattr, - conn.session, - "default_fetch_size", - initial_default - ) - - connection.set_default_connection("ONEPAGE") - for fetch_size in (2, 3, 7, 10, 99, 100, 101, 150): - connection._connections["ONEPAGE"].session.default_fetch_size = fetch_size - rows = TestMultiKeyModel.objects().allow_filtering() - self.assertEqual(fetch_size, len(rows)) - - connection.set_default_connection("MANYPAGES") - for fetch_size in (2, 3, 7, 10, 15): - connection._connections["MANYPAGES"].session.default_fetch_size = fetch_size - rows = TestMultiKeyModel.objects().allow_filtering() - self.assertEqual(fetch_size * 10, len(rows)) - - for connection_name in self.sane_connections: - connection.set_default_connection(connection_name) - for fetch_size in (2, 3, 7, 10, 99, 100, 101, 150): - connection._connections[connection_name].session.default_fetch_size = fetch_size - rows = TestMultiKeyModel.objects().allow_filtering() - self.assertEqual(1000, len(rows)) - - -@requiredse -@greaterthanorequaldse51 -class ContPagingTestsDSEV1(BasicConcurrentTests, unittest.TestCase): - @classmethod - def setUpClass(cls): - BasicConcurrentTests.required_dse_version = Version('5.1') - if not DSE_VERSION or DSE_VERSION < BasicConcurrentTests.required_dse_version: - return - - BasicConcurrentTests.protocol_version = ProtocolVersion.DSE_V1 - BasicConcurrentTests.setUpClass() - -@requiredse -@greaterthanorequaldse60 -class ContPagingTestsDSEV2(BasicConcurrentTests, unittest.TestCase): - @classmethod - def setUpClass(cls): - BasicConcurrentTests.required_dse_version = Version('6.0') - if not DSE_VERSION or DSE_VERSION < BasicConcurrentTests.required_dse_version: - return - BasicConcurrentTests.protocol_version = ProtocolVersion.DSE_V2 - BasicConcurrentTests.setUpClass() - - cls.connections = cls.connections.union({"SMALL_QUEUE", "BIG_QUEUE"}) - cls.sane_connections = cls.sane_connections.union({"SMALL_QUEUE", "BIG_QUEUE"}) - - cls._create_cluster_with_cp_options("SMALL_QUEUE", ContinuousPagingOptions(max_queue_size=2)) - cls._create_cluster_with_cp_options("BIG_QUEUE", ContinuousPagingOptions(max_queue_size=400)) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index ab5ea9f901..55fb62f22c 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -23,7 +23,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import DSE_VERSION, PROTOCOL_VERSION, greaterthancass20, requires_collection_indexes, \ +from tests.integration import PROTOCOL_VERSION, greaterthancass20, requires_collection_indexes, \ MockLoggingHandler, CASSANDRA_VERSION, SCYLLA_VERSION, xfail_scylla from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index 7848a21b1d..a6dff4d786 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -30,7 +30,7 @@ ) from cassandra.query import SimpleStatement -from tests.integration import use_singledc, use_multidc, remove_cluster, TestCluster, greaterthanorequalcass40, notdse +from tests.integration import use_singledc, use_multidc, remove_cluster, TestCluster, greaterthanorequalcass40 from tests.integration.long.utils import (wait_for_up, create_schema, CoordinatorStats, force_stop, wait_for_down, decommission, start, @@ -614,7 +614,6 @@ def test_token_aware_with_shuffle_rf3(self): self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) - @notdse @greaterthanorequalcass40 def test_token_aware_with_transient_replication(self): """ diff --git a/tests/integration/long/utils.py b/tests/integration/long/utils.py index 58c3241a42..a3ae705a34 100644 --- a/tests/integration/long/utils.py +++ b/tests/integration/long/utils.py @@ -19,7 +19,7 @@ from packaging.version import Version from tests.integration import (get_node, get_cluster, wait_for_node_socket, - DSE_VERSION, CASSANDRA_VERSION) + CASSANDRA_VERSION) IP_FORMAT = '127.0.0.%s' @@ -92,7 +92,7 @@ def force_stop(node): def decommission(node): - if (DSE_VERSION and DSE_VERSION >= Version("5.1")) or CASSANDRA_VERSION >= Version("4.0-a"): + if CASSANDRA_VERSION >= Version("4.0-a"): # CASSANDRA-12510 get_node(node).decommission(force=True) else: diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index c959fd6e08..b75b67c540 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -13,7 +13,7 @@ # limitations under the License import unittest -from tests.integration import requiredse, CASSANDRA_VERSION, DSE_VERSION, SIMULACRON_JAR, PROTOCOL_VERSION +from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR, PROTOCOL_VERSION from tests.integration.simulacron.utils import ( clear_queries, start_and_prime_singledc, @@ -26,7 +26,7 @@ from packaging.version import Version -PROTOCOL_VERSION = min(4, PROTOCOL_VERSION if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3) +PROTOCOL_VERSION = min(4, PROTOCOL_VERSION) def teardown_package(): @@ -61,22 +61,3 @@ def tearDownClass(cls): if cls.cluster: cls.cluster.shutdown() stop_simulacron() - - -@requiredse -class DseSimulacronCluster(SimulacronBase): - - simulacron_cluster = None - cluster, connect = None, True - nodes_per_dc = 1 - - @classmethod - def setUpClass(cls): - if DSE_VERSION is None and SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"): - return - - cls.simulacron_cluster = start_and_prime_cluster_defaults(dse_version=DSE_VERSION, - nodes_per_dc=cls.nodes_per_dc) - if cls.connect: - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) - cls.session = cls.cluster.connect(wait_for_all_pools=True) diff --git a/tests/integration/simulacron/advanced/__init__.py b/tests/integration/simulacron/advanced/__init__.py deleted file mode 100644 index 2c9ca172f8..0000000000 --- a/tests/integration/simulacron/advanced/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/tests/integration/simulacron/advanced/test_insights.py b/tests/integration/simulacron/advanced/test_insights.py deleted file mode 100644 index 5ddae4ec7c..0000000000 --- a/tests/integration/simulacron/advanced/test_insights.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import unittest - -import time -import json -import re - -from cassandra.cluster import Cluster -from cassandra.datastax.insights.util import version_supports_insights - -from tests.integration import requiressimulacron, requiredse, DSE_VERSION -from tests.integration.simulacron import DseSimulacronCluster, PROTOCOL_VERSION -from tests.integration.simulacron.utils import SimulacronClient, GetLogsQuery, ClearLogsQuery - - -@requiredse -@requiressimulacron -@unittest.skipUnless(DSE_VERSION and version_supports_insights(str(DSE_VERSION)), 'DSE {} does not support insights'.format(DSE_VERSION)) -class InsightsTests(DseSimulacronCluster): - """ - Tests insights integration - - @since 3.18 - @jira_ticket PYTHON-1047 - @expected_result startup and status messages are sent - """ - - connect = False - - def tearDown(self): - if self.cluster: - self.cluster.shutdown() - - @staticmethod - def _get_node_logs(raw_data): - return list(filter(lambda q: q['type'] == 'QUERY' and q['query'].startswith('CALL InsightsRpc.reportInsight'), - json.loads(raw_data)['data_centers'][0]['nodes'][0]['queries'])) - - @staticmethod - def _parse_data(data, index=0): - return json.loads(re.match( - r"CALL InsightsRpc.reportInsight\('(.+)'\)", - data[index]['frame']['message']['query']).group(1)) - - def test_startup_message(self): - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) - self.session = self.cluster.connect(wait_for_all_pools=True) - - time.sleep(1) # wait the monitor thread is started - response = SimulacronClient().submit_request(GetLogsQuery()) - self.assertTrue('CALL InsightsRpc.reportInsight' in response) - - node_queries = self._get_node_logs(response) - self.assertEqual(1, len(node_queries)) - self.assertTrue(node_queries, "RPC query not found") - - message = self._parse_data(node_queries) - - self.assertEqual(message['metadata']['name'], 'driver.startup') - self.assertEqual(message['data']['initialControlConnection'], - self.cluster.control_connection._connection.host) - self.assertEqual(message['data']['sessionId'], str(self.session.session_id)) - self.assertEqual(message['data']['clientId'], str(self.cluster.client_id)) - self.assertEqual(message['data']['compression'], 'NONE') - - def test_status_message(self): - SimulacronClient().submit_request(ClearLogsQuery()) - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, monitor_reporting_interval=1) - self.session = self.cluster.connect(wait_for_all_pools=True) - - time.sleep(1.1) - response = SimulacronClient().submit_request(GetLogsQuery()) - self.assertTrue('CALL InsightsRpc.reportInsight' in response) - - node_queries = self._get_node_logs(response) - self.assertEqual(2, len(node_queries)) - self.assertTrue(node_queries, "RPC query not found") - - message = self._parse_data(node_queries, 1) - - self.assertEqual(message['metadata']['name'], 'driver.status') - self.assertEqual(message['data']['controlConnection'], - self.cluster.control_connection._connection.host) - self.assertEqual(message['data']['sessionId'], str(self.session.session_id)) - self.assertEqual(message['data']['clientId'], str(self.cluster.client_id)) - self.assertEqual(message['metadata']['insightType'], 'EVENT') - - def test_monitor_disabled(self): - SimulacronClient().submit_request(ClearLogsQuery()) - - self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False, monitor_reporting_enabled=False) - self.session = self.cluster.connect(wait_for_all_pools=True) - - response = SimulacronClient().submit_request(GetLogsQuery()) - self.assertFalse('CALL InsightsRpc.reportInsight' in response) diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index dfbf6c0ec6..53aa9936fc 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -18,7 +18,7 @@ import cassandra from tests.integration.simulacron import SimulacronCluster, SimulacronBase -from tests.integration import (requiressimulacron, PROTOCOL_VERSION, DSE_VERSION, MockLoggingHandler) +from tests.integration import (requiressimulacron, PROTOCOL_VERSION, MockLoggingHandler) from tests.integration.simulacron.utils import prime_query, start_and_prime_singledc from cassandra import (WriteTimeout, WriteType, @@ -26,7 +26,7 @@ from cassandra.cluster import Cluster, ControlConnection -PROTOCOL_VERSION = min(4, PROTOCOL_VERSION if (DSE_VERSION is None or DSE_VERSION >= Version('5.0')) else 3) +PROTOCOL_VERSION = min(4, PROTOCOL_VERSION) @requiressimulacron class ClusterTests(SimulacronCluster): @@ -89,7 +89,7 @@ class DuplicateRpcTest(SimulacronCluster): def test_duplicate(self): with MockLoggingHandler().set_module_name(cassandra.cluster.__name__) as mock_handler: - address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION > Version("6.0") else "rpc_address" + address_column = "rpc_address" rows = [ {"peer": "127.0.0.1", "data_center": "dc", "host_id": "dontcare1", "rack": "rack1", "release_version": "3.11.4", address_column: "127.0.0.1", "schema_version": "dontcare", "tokens": "1"}, diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index 37e259dfd7..b6136e247a 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -20,7 +20,7 @@ from cassandra.metadata import SchemaParserV4, SchemaParserDSE68 from tests.util import wait_until_not_raised -from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR, DSE_VERSION +from tests.integration import CASSANDRA_VERSION, SIMULACRON_JAR DEFAULT_CLUSTER = "python_simulacron_cluster" @@ -122,12 +122,6 @@ def prime_server_versions(self): system_local_row = {} system_local_row["cql_version"] = CASSANDRA_VERSION.base_version system_local_row["release_version"] = CASSANDRA_VERSION.base_version + "-SNAPSHOT" - if DSE_VERSION: - system_local_row["dse_version"] = DSE_VERSION.base_version - column_types = {"cql_version": "ascii", "release_version": "ascii"} - system_local = PrimeQuery("SELECT cql_version, release_version FROM system.local WHERE key='local'", - rows=[system_local_row], - column_types=column_types) self.submit_request(system_local) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 9c01fc00a9..503b9304b3 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -42,7 +42,7 @@ from tests.integration import use_cluster, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ get_unsupported_upper_protocol, protocolv6, local, CASSANDRA_IP, greaterthanorequalcass30, \ - lessthanorequalcass40, DSE_VERSION, TestCluster, PROTOCOL_VERSION, xfail_scylla, incorrect_test + lessthanorequalcass40, TestCluster, PROTOCOL_VERSION, xfail_scylla, incorrect_test from tests.integration.util import assert_quiescent_pool_state import sys @@ -255,13 +255,7 @@ def test_protocol_negotiation(self): updated_protocol_version = session._protocol_version updated_cluster_version = cluster.protocol_version # Make sure the correct protocol was selected by default - if DSE_VERSION and DSE_VERSION >= Version("6.0"): - self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.DSE_V2) - self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.DSE_V2) - elif DSE_VERSION and DSE_VERSION >= Version("5.1"): - self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.DSE_V1) - self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.DSE_V1) - elif CASSANDRA_VERSION >= Version('4.0-beta5'): + if CASSANDRA_VERSION >= Version('4.0-beta5'): self.assertEqual(updated_protocol_version, cassandra.ProtocolVersion.V5) self.assertEqual(updated_cluster_version, cassandra.ProtocolVersion.V5) elif CASSANDRA_VERSION >= Version('4.0-a'): diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index b6e0d3ccd3..ea434c37c5 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -20,7 +20,7 @@ from cassandra.protocol import ConfigurationException -from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster, greaterthanorequalcass40, notdse +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster, greaterthanorequalcass40 from tests.integration.datatype_utils import update_datatypes @@ -102,7 +102,6 @@ def test_get_control_connection_host(self): # TODO: enable after https://github.com/scylladb/python-driver/issues/121 is fixed @unittest.skip('Fails on scylla due to the broadcast_rpc_port is None') - @notdse @greaterthanorequalcass40 def test_control_connection_port_discovery(self): """ diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 35dba6c1b5..26d3f5fe35 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -21,8 +21,8 @@ from cassandra import ProtocolVersion, ConsistencyLevel from tests.integration import use_singledc, drop_keyspace_shutdown_cluster, \ - greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequaldse51, greaterthanorequalcass3_10, \ - TestCluster, greaterthanorequalcass40, requirecassandra + greaterthanorequalcass30, execute_with_long_wait_retry, greaterthanorequalcass3_10, \ + TestCluster, greaterthanorequalcass40 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params @@ -121,7 +121,6 @@ def test_custom_raw_row_results_all_types(self): cluster.shutdown() @unittest.expectedFailure - @requirecassandra @greaterthanorequalcass40 def test_protocol_divergence_v5_fail_by_continuous_paging(self): """ @@ -169,7 +168,6 @@ def test_protocol_divergence_v4_fail_by_flag_uses_int(self): int_flag=True) @unittest.expectedFailure - @requirecassandra @greaterthanorequalcass40 def test_protocol_v5_uses_flag_int(self): """ @@ -183,21 +181,7 @@ def test_protocol_v5_uses_flag_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V5, uses_int_query_flag=True, beta=True, int_flag=True) - @greaterthanorequaldse51 - def test_protocol_dsev1_uses_flag_int(self): - """ - Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for DSE_V1 - - @jira_ticket PYTHON-694 - @expected_result the fetch_size=1 parameter will be honored - - @test_category connection - """ - self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.DSE_V1, uses_int_query_flag=True, - int_flag=True) - @unittest.expectedFailure - @requirecassandra @greaterthanorequalcass40 def test_protocol_divergence_v5_fail_by_flag_uses_int(self): """ @@ -211,19 +195,6 @@ def test_protocol_divergence_v5_fail_by_flag_uses_int(self): self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V5, uses_int_query_flag=False, beta=True, int_flag=False) - @greaterthanorequaldse51 - def test_protocol_divergence_dsev1_fail_by_flag_uses_int(self): - """ - Test to validate that the _PAGE_SIZE_FLAG is treated correctly using write_uint for DSE_V1 - - @jira_ticket PYTHON-694 - @expected_result the fetch_size=1 parameter will be honored - - @test_category connection - """ - self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.DSE_V1, uses_int_query_flag=False, - int_flag=False) - def _send_query_message(self, session, timeout, **kwargs): query = "SELECT * FROM test3rf.test" message = QueryMessage(query=query, **kwargs) diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py deleted file mode 100644 index 7b96094b3f..0000000000 --- a/tests/integration/standard/test_dse.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from packaging.version import Version - -from tests import notwindows -from tests.unit.cython.utils import notcython -from tests.integration import (execute_until_pass, - execute_with_long_wait_retry, use_cluster, TestCluster) - -import unittest - - -CCM_IS_DSE = (os.environ.get('CCM_IS_DSE', None) == 'true') - - -@unittest.skipIf(os.environ.get('CCM_ARGS', None), 'environment has custom CCM_ARGS; skipping') -@notwindows -@notcython # no need to double up on this test; also __default__ setting doesn't work -class DseCCMClusterTest(unittest.TestCase): - """ - This class can be executed setting the DSE_VERSION variable, for example: - DSE_VERSION=5.1.4 python2.7 -m nose tests/integration/standard/test_dse.py - If CASSANDRA_VERSION is set instead, it will be converted to the corresponding DSE_VERSION - """ - - def test_dse_5x(self): - self._test_basic(Version('5.1.10')) - - def test_dse_60(self): - self._test_basic(Version('6.0.2')) - - @unittest.skipUnless(CCM_IS_DSE, 'DSE version unavailable') - def test_dse_67(self): - self._test_basic(Version('6.7.0')) - - def _test_basic(self, dse_version): - """ - Test basic connection and usage - """ - cluster_name = '{}-{}'.format( - self.__class__.__name__, dse_version.base_version.replace('.', '_') - ) - use_cluster(cluster_name=cluster_name, nodes=[3], dse_options={}) - - cluster = TestCluster() - session = cluster.connect() - result = execute_until_pass( - session, - """ - CREATE KEYSPACE clustertests - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} - """) - self.assertFalse(result) - - result = execute_with_long_wait_retry( - session, - """ - CREATE TABLE clustertests.cf0 ( - a text, - b text, - c text, - PRIMARY KEY (a, b) - ) - """) - self.assertFalse(result) - - result = session.execute( - """ - INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c') - """) - self.assertFalse(result) - - result = session.execute("SELECT * FROM clustertests.cf0") - self.assertEqual([('a', 'b', 'c')], result) - - execute_with_long_wait_retry(session, "DROP KEYSPACE clustertests") - - cluster.shutdown() diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c76ffa22e9..8d677030f9 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -37,11 +37,11 @@ from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, execute_until_pass, BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase, BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION, - greaterthanorequaldse51, greaterthanorequalcass30, lessthancass30, local, + greaterthanorequalcass30, lessthancass30, local, get_supported_protocol_versions, greaterthancass20, greaterthancass21, assert_startswith, greaterthanorequalcass40, - greaterthanorequaldse67, lessthancass40, - TestCluster, DSE_VERSION, requires_java_udf, requires_composite_type, + lessthancass40, + TestCluster, requires_java_udf, requires_composite_type, requires_collection_indexes, SCYLLA_VERSION, xfail_scylla, xfail_scylla_version_lt) from tests.util import wait_until @@ -74,7 +74,7 @@ def test_host_addresses(self): self.assertIsNotNone(host.broadcast_rpc_address) self.assertIsNotNone(host.host_id) - if not DSE_VERSION and CASSANDRA_VERSION >= Version('4-a'): + if CASSANDRA_VERSION >= Version('4-a'): self.assertIsNotNone(host.broadcast_port) self.assertIsNotNone(host.broadcast_rpc_port) @@ -1066,7 +1066,7 @@ def test_metadata_pagination_keyspaces(self): test for covering https://github.com/scylladb/python-driver/issues/174 """ - + self.cluster.refresh_schema_metadata() keyspaces = [f"keyspace{idx}" for idx in range(15)] @@ -2509,21 +2509,6 @@ def test_metadata_with_quoted_identifiers(self): self.assertIsNotNone(value_column) self.assertEqual(value_column.name, 'the Value') - @greaterthanorequaldse51 - def test_dse_workloads(self): - """ - Test to ensure dse_workloads is populated appropriately. - Field added in DSE 5.1 - - @jira_ticket PYTHON-667 - @expected_result dse_workloads set is set on host model - - @test_category metadata - """ - for host in self.cluster.metadata.all_hosts(): - self.assertIsInstance(host.dse_workloads, SortedSet) - self.assertIn("Cassandra", host.dse_workloads) - class GroupPerHost(BasicSharedKeyspaceUnitTestCase): @classmethod @@ -2588,31 +2573,3 @@ def test_existing_keyspaces_have_correct_virtual_tags(self): ks.virtual, 'incorrect .virtual value for {}'.format(name) ) - - @greaterthanorequalcass40 - @greaterthanorequaldse67 - def test_expected_keyspaces_exist_and_are_virtual(self): - for name in self.virtual_ks_names: - self.assertTrue( - self.cluster.metadata.keyspaces[name].virtual, - 'incorrect .virtual value for {}'.format(name) - ) - - @greaterthanorequalcass40 - @greaterthanorequaldse67 - def test_virtual_keyspaces_have_expected_schema_structure(self): - self.maxDiff = None - - ingested_virtual_ks_structure = defaultdict(dict) - for ks_name, ks in self.cluster.metadata.keyspaces.items(): - if not ks.virtual: - continue - for tab_name, tab in ks.tables.items(): - ingested_virtual_ks_structure[ks_name][tab_name] = set( - tab.columns.keys() - ) - - # Identify a couple known values to verify we parsed the structure correctly - self.assertIn('table_name', ingested_virtual_ks_structure['system_virtual_schema']['tables']) - self.assertIn('type', ingested_virtual_ks_structure['system_virtual_schema']['columns']) - self.assertIn('total', ingested_virtual_ks_structure['system_views']['sstable_tasks']) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 5ccc0732fa..d413a4dc95 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -23,8 +23,8 @@ from cassandra import ConsistencyLevel, ProtocolVersion from cassandra.query import PreparedStatement, UNSET_VALUE -from tests.integration import (get_server_versions, greaterthanorequalcass40, greaterthanorequaldse50, - requirecassandra, BasicSharedKeyspaceUnitTestCase) +from tests.integration import (get_server_versions, greaterthanorequalcass40, + BasicSharedKeyspaceUnitTestCase) import logging @@ -563,7 +563,6 @@ def test_id_is_not_updated_conditional_v4(self): self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 9) - @requirecassandra def test_id_is_not_updated_conditional_v5(self): """ Test that verifies that the result_metadata and the @@ -577,36 +576,6 @@ def test_id_is_not_updated_conditional_v5(self): self.addCleanup(cluster.shutdown) self._test_updated_conditional(session, 10) - @greaterthanorequaldse50 - def test_id_is_not_updated_conditional_dsev1(self): - """ - Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements - in protocol DSE V1 - - @since 3.13 - @jira_ticket PYTHON-847 - """ - cluster = TestCluster(protocol_version=ProtocolVersion.DSE_V1) - session = cluster.connect() - self.addCleanup(cluster.shutdown) - self._test_updated_conditional(session, 10) - - @greaterthanorequaldse50 - def test_id_is_not_updated_conditional_dsev2(self): - """ - Test that verifies that the result_metadata and the - result_metadata_id are udpated correctly in conditional statements - in protocol DSE V2 - - @since 3.13 - @jira_ticket PYTHON-847 - """ - cluster = TestCluster(protocol_version=ProtocolVersion.DSE_V2) - session = cluster.connect() - self.addCleanup(cluster.shutdown) - self._test_updated_conditional(session, 10) - def _test_updated_conditional(self, session, value): prepared_statement = session.prepare( "INSERT INTO {}(a, b, d) VALUES " diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 5c20c50a1a..a4d1b083bf 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -26,7 +26,7 @@ from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ - USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION, TestCluster, requirecassandra, xfail_scylla + USE_CASS_EXTERNAL, greaterthanorequalcass40, TestCluster, xfail_scylla from tests import notwindows from tests.integration import greaterthanorequalcass30, get_node @@ -1403,7 +1403,6 @@ def test_setting_keyspace(self): """ self._check_set_keyspace_in_statement(self.session) - @requirecassandra @greaterthanorequalcass40 def test_setting_keyspace_and_session(self): """ @@ -1572,7 +1571,7 @@ def test_reprepare_after_host_is_down(self): # We wait for cluster._prepare_all_queries to be called time.sleep(5) self.assertEqual(1, mock_handler.get_message_count('debug', 'Preparing all known prepared statements')) - + results = self.session.execute(prepared_statement, (1,), execution_profile="only_first") self.assertEqual(results.one(), (1, )) diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index e836b5f428..681e992477 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -19,12 +19,12 @@ from packaging.version import Version from tests.integration import use_singledc, PROTOCOL_VERSION, \ - remove_cluster, greaterthanorequalcass40, notdse, \ - CASSANDRA_VERSION, DSE_VERSION, TestCluster, DEFAULT_SINGLE_INTERFACE_PORT + remove_cluster, greaterthanorequalcass40, \ + CASSANDRA_VERSION, TestCluster, DEFAULT_SINGLE_INTERFACE_PORT def setup_module(): - if not DSE_VERSION and CASSANDRA_VERSION >= Version('4-a'): + if CASSANDRA_VERSION >= Version('4-a'): remove_cluster() use_singledc(use_single_interface=True) @@ -32,7 +32,6 @@ def teardown_module(): remove_cluster() -@notdse @greaterthanorequalcass40 class SingleInterfaceTest(unittest.TestCase): diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 3d0dc0ed7c..eb50c7780a 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -38,8 +38,8 @@ from tests.unit.cython.utils import cythontest from tests.integration import use_singledc, execute_until_pass, notprotocolv1, \ - BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequaldse51, \ - DSE_VERSION, greaterthanorequalcass3_10, requiredse, TestCluster, requires_composite_type, greaterthanorequalcass50 + BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, \ + greaterthanorequalcass3_10, TestCluster, requires_composite_type, greaterthanorequalcass50 from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_all_samples, get_collection_sample @@ -901,372 +901,6 @@ def test_smoke_duration_values(self): self.assertRaises(ValueError, self.session.execute, prepared, (1, Duration(int("8FFFFFFFFFFFFFF0", 16), 0, 0))) - -@requiredse -class AbstractDateRangeTest(): - - def test_single_value_daterange_round_trip(self): - self._daterange_round_trip( - util.DateRange( - value=util.DateRangeBound( - datetime(2014, 10, 1, 0), - util.DateRangePrecision.YEAR - ) - ), - util.DateRange( - value=util.DateRangeBound( - datetime(2014, 1, 1, 0), - util.DateRangePrecision.YEAR - ) - ) - ) - - def test_open_high_daterange_round_trip(self): - self._daterange_round_trip( - util.DateRange( - lower_bound=util.DateRangeBound( - datetime(2013, 10, 1, 6, 20, 39), - util.DateRangePrecision.SECOND - ) - ) - ) - - def test_open_low_daterange_round_trip(self): - self._daterange_round_trip( - util.DateRange( - upper_bound=util.DateRangeBound( - datetime(2013, 10, 28), - util.DateRangePrecision.DAY - ) - ) - ) - - def test_open_both_daterange_round_trip(self): - self._daterange_round_trip( - util.DateRange( - lower_bound=util.OPEN_BOUND, - upper_bound=util.OPEN_BOUND, - ) - ) - - def test_closed_daterange_round_trip(self): - insert = util.DateRange( - lower_bound=util.DateRangeBound( - datetime(2015, 3, 1, 10, 15, 30, 1000), - util.DateRangePrecision.MILLISECOND - ), - upper_bound=util.DateRangeBound( - datetime(2016, 1, 1, 10, 15, 30, 999000), - util.DateRangePrecision.MILLISECOND - ) - ) - self._daterange_round_trip(insert) - - def test_epoch_value_round_trip(self): - insert = util.DateRange( - value=util.DateRangeBound( - datetime(1970, 1, 1), - util.DateRangePrecision.YEAR - ) - ) - self._daterange_round_trip(insert) - - def test_double_bounded_daterange_round_trip_from_string(self): - self._daterange_round_trip( - '[2015-03-01T10:15:30.010Z TO 2016-01-01T10:15:30.999Z]', - util.DateRange( - lower_bound=util.DateRangeBound( - datetime(2015, 3, 1, 10, 15, 30, 10000), - util.DateRangePrecision.MILLISECOND - ), - upper_bound=util.DateRangeBound( - datetime(2016, 1, 1, 10, 15, 30, 999000), - util.DateRangePrecision.MILLISECOND - ), - ) - ) - - def test_open_high_daterange_round_trip_from_string(self): - self._daterange_round_trip( - '[2015-03 TO *]', - util.DateRange( - lower_bound=util.DateRangeBound( - datetime(2015, 3, 1, 0, 0), - util.DateRangePrecision.MONTH - ), - upper_bound=util.DateRangeBound(None, None) - ) - ) - - def test_open_low_daterange_round_trip_from_string(self): - self._daterange_round_trip( - '[* TO 2015-03]', - util.DateRange( - lower_bound=util.DateRangeBound(None, None), - upper_bound=util.DateRangeBound( - datetime(2015, 3, 1, 0, 0), - 'MONTH' - ) - ) - ) - - def test_no_bounds_daterange_round_trip_from_string(self): - self._daterange_round_trip( - '[* TO *]', - util.DateRange( - lower_bound=(None, None), - upper_bound=(None, None) - ) - ) - - def test_single_no_bounds_daterange_round_trip_from_string(self): - self._daterange_round_trip( - '*', - util.DateRange( - value=(None, None) - ) - ) - - def test_single_value_daterange_round_trip_from_string(self): - self._daterange_round_trip( - '2001-01-01T12:30:30.000Z', - util.DateRange( - value=util.DateRangeBound( - datetime(2001, 1, 1, 12, 30, 30), - 'MILLISECOND' - ) - ) - ) - - def test_daterange_with_negative_bound_round_trip_from_string(self): - self._daterange_round_trip( - '[-1991-01-01T00:00:00.001 TO 1990-02-03]', - util.DateRange( - lower_bound=(-124997039999999, 'MILLISECOND'), - upper_bound=util.DateRangeBound( - datetime(1990, 2, 3, 12, 30, 30), - 'DAY' - ) - ) - ) - - def test_epoch_value_round_trip_from_string(self): - self._daterange_round_trip( - '1970', - util.DateRange( - value=util.DateRangeBound( - datetime(1970, 1, 1), - util.DateRangePrecision.YEAR - ) - ) - ) - - -@greaterthanorequaldse51 -class TestDateRangePrepared(AbstractDateRangeTest, BasicSharedKeyspaceUnitTestCase): - """ - Tests various inserts and queries using Date-ranges and prepared queries - - @since 2.0.0 - @jira_ticket PYTHON-668 - @expected_result Date ranges will be inserted and retrieved succesfully - - @test_category data_types - """ - - @classmethod - def setUpClass(cls): - super(TestDateRangePrepared, cls).setUpClass() - cls.session.set_keyspace(cls.ks_name) - if DSE_VERSION and DSE_VERSION >= Version('5.1'): - cls.session.execute("CREATE TABLE tab (dr 'DateRangeType' PRIMARY KEY)") - - - def _daterange_round_trip(self, to_insert, expected=None): - if isinstance(to_insert, util.DateRange): - prep = self.session.prepare("INSERT INTO tab (dr) VALUES (?);") - self.session.execute(prep, (to_insert,)) - prep_sel = self.session.prepare("SELECT * FROM tab WHERE dr = ? ") - results = self.session.execute(prep_sel, (to_insert,)) - else: - prep = self.session.prepare("INSERT INTO tab (dr) VALUES ('%s');" % (to_insert,)) - self.session.execute(prep) - prep_sel = self.session.prepare("SELECT * FROM tab WHERE dr = '%s' " % (to_insert,)) - results = self.session.execute(prep_sel) - - dr = results.one().dr - # sometimes this is truncated in the assertEqual output on failure; - if isinstance(expected, str): - self.assertEqual(str(dr), expected) - else: - self.assertEqual(dr, expected or to_insert) - - # This can only be run as a prepared statement - def test_daterange_wide(self): - self._daterange_round_trip( - util.DateRange( - lower_bound=(-9223372036854775808, 'MILLISECOND'), - upper_bound=(9223372036854775807, 'MILLISECOND') - ), - '[-9223372036854775808ms TO 9223372036854775807ms]' - ) - # This can only be run as a prepared statement - def test_daterange_with_negative_bound_round_trip_to_string(self): - self._daterange_round_trip( - util.DateRange( - lower_bound=(-124997039999999, 'MILLISECOND'), - upper_bound=util.DateRangeBound( - datetime(1990, 2, 3, 12, 30, 30), - 'DAY' - ) - ), - '[-124997039999999ms TO 1990-02-03]' - ) - -@greaterthanorequaldse51 -class TestDateRangeSimple(AbstractDateRangeTest, BasicSharedKeyspaceUnitTestCase): - """ - Tests various inserts and queries using Date-ranges and simple queries - - @since 2.0.0 - @jira_ticket PYTHON-668 - @expected_result DateRanges will be inserted and retrieved successfully - @test_category data_types - """ - @classmethod - def setUpClass(cls): - super(TestDateRangeSimple, cls).setUpClass() - cls.session.set_keyspace(cls.ks_name) - if DSE_VERSION and DSE_VERSION >= Version('5.1'): - cls.session.execute("CREATE TABLE tab (dr 'DateRangeType' PRIMARY KEY)") - - - def _daterange_round_trip(self, to_insert, expected=None): - - query = "INSERT INTO tab (dr) VALUES ('{0}');".format(to_insert) - self.session.execute("INSERT INTO tab (dr) VALUES ('{0}');".format(to_insert)) - query = "SELECT * FROM tab WHERE dr = '{0}' ".format(to_insert) - results= self.session.execute("SELECT * FROM tab WHERE dr = '{0}' ".format(to_insert)) - - dr = results.one().dr - # sometimes this is truncated in the assertEqual output on failure; - if isinstance(expected, str): - self.assertEqual(str(dr), expected) - else: - self.assertEqual(dr, expected or to_insert) - - -@greaterthanorequaldse51 -class TestDateRangeCollection(BasicSharedKeyspaceUnitTestCase): - - - @classmethod - def setUpClass(cls): - super(TestDateRangeCollection, cls).setUpClass() - cls.session.set_keyspace(cls.ks_name) - - def test_date_range_collection(self): - """ - Tests DateRange type in collections - - @since 2.0.0 - @jira_ticket PYTHON-668 - @expected_result DateRanges will be inserted and retrieved successfully when part of a list or map - @test_category data_types - """ - self.session.execute("CREATE TABLE dateRangeIntegrationTest5 (k int PRIMARY KEY, l list<'DateRangeType'>, s set<'DateRangeType'>, dr2i map<'DateRangeType', int>, i2dr map)") - self.session.execute("INSERT INTO dateRangeIntegrationTest5 (k, l, s, i2dr, dr2i) VALUES (" + - "1, " + - "['[2000-01-01T10:15:30.001Z TO 2020]', '[2010-01-01T10:15:30.001Z TO 2020]', '2001-01-02'], " + - "{'[2000-01-01T10:15:30.001Z TO 2020]', '[2000-01-01T10:15:30.001Z TO 2020]', '[2010-01-01T10:15:30.001Z TO 2020]'}, " + - "{1: '[2000-01-01T10:15:30.001Z TO 2020]', 2: '[2010-01-01T10:15:30.001Z TO 2020]'}, " + - "{'[2000-01-01T10:15:30.001Z TO 2020]': 1, '[2010-01-01T10:15:30.001Z TO 2020]': 2})") - results = self.session.execute("SELECT * FROM dateRangeIntegrationTest5").all() - self.assertEqual(len(results),1) - - lower_bound_1 = util.DateRangeBound(datetime(2000, 1, 1, 10, 15, 30, 1000), 'MILLISECOND') - - lower_bound_2 = util.DateRangeBound(datetime(2010, 1, 1, 10, 15, 30, 1000), 'MILLISECOND') - - upper_bound_1 = util.DateRangeBound(datetime(2020, 1, 1), 'YEAR') - - value_1 = util.DateRangeBound(datetime(2001, 1, 2), 'DAY') - - dt = util.DateRange(lower_bound=lower_bound_1, upper_bound=upper_bound_1) - dt2 = util.DateRange(lower_bound=lower_bound_2, upper_bound=upper_bound_1) - dt3 = util.DateRange(value=value_1) - - - - list_result = results[0].l - self.assertEqual(3, len(list_result)) - self.assertEqual(list_result[0],dt) - self.assertEqual(list_result[1],dt2) - self.assertEqual(list_result[2],dt3) - - set_result = results[0].s - self.assertEqual(len(set_result), 2) - self.assertIn(dt, set_result) - self.assertIn(dt2, set_result) - - d2i = results[0].dr2i - self.assertEqual(len(d2i), 2) - self.assertEqual(d2i[dt],1) - self.assertEqual(d2i[dt2],2) - - i2r = results[0].i2dr - self.assertEqual(len(i2r), 2) - self.assertEqual(i2r[1],dt) - self.assertEqual(i2r[2],dt2) - - def test_allow_date_range_in_udt_tuple(self): - """ - Tests DateRanges in tuples and udts - - @since 2.0.0 - @jira_ticket PYTHON-668 - @expected_result DateRanges will be inserted and retrieved successfully in udt's and tuples - @test_category data_types - """ - self.session.execute("CREATE TYPE IF NOT EXISTS test_udt (i int, range 'DateRangeType')") - self.session.execute("CREATE TABLE dateRangeIntegrationTest4 (k int PRIMARY KEY, u test_udt, uf frozen, t tuple<'DateRangeType', int>, tf frozen>)") - self.session.execute("INSERT INTO dateRangeIntegrationTest4 (k, u, uf, t, tf) VALUES (" + - "1, " + - "{i: 10, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " + - "{i: 20, range: '[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]'}, " + - "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 30), " + - "('[2000-01-01T10:15:30.003Z TO 2020-01-01T10:15:30.001Z]', 40))") - - lower_bound = util.DateRangeBound( - datetime(2000, 1, 1, 10, 15, 30, 3000), - 'MILLISECOND') - - upper_bound = util.DateRangeBound( - datetime(2020, 1, 1, 10, 15, 30, 1000), - 'MILLISECOND') - - expected_dt = util.DateRange(lower_bound=lower_bound ,upper_bound=upper_bound) - - results_list = list(self.session.execute("SELECT * FROM dateRangeIntegrationTest4")) - self.assertEqual(len(results_list), 1) - udt = results_list[0].u - self.assertEqual(udt.range, expected_dt) - self.assertEqual(udt.i, 10) - - - uf = results_list[0].uf - self.assertEqual(uf.range, expected_dt) - self.assertEqual(uf.i, 20) - - t = results_list[0].t - self.assertEqual(t[0], expected_dt) - self.assertEqual(t[1], 30) - - tf = results_list[0].tf - self.assertEqual(tf[0], expected_dt) - self.assertEqual(tf[1], 40) - - class TypeTestsProtocol(BasicSharedKeyspaceUnitTestCase): @greaterthancass21 diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py deleted file mode 100644 index 840073e9e1..0000000000 --- a/tests/unit/advanced/test_auth.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from puresasl import QOP - -import unittest - -from cassandra.auth import DSEGSSAPIAuthProvider - -# Cannot import requiredse from tests.integration -# This auth provider requires kerberos and puresals -DSE_VERSION = os.getenv('DSE_VERSION', None) -@unittest.skipUnless(DSE_VERSION, "DSE required") -class TestGSSAPI(unittest.TestCase): - - def test_host_resolution(self): - # resolves by default - provider = DSEGSSAPIAuthProvider(service='test', qops=QOP.all) - authenticator = provider.new_authenticator('127.0.0.1') - self.assertEqual(authenticator.sasl.host, 'localhost') - - # numeric fallback okay - authenticator = provider.new_authenticator('192.0.2.1') - self.assertEqual(authenticator.sasl.host, '192.0.2.1') - - # disable explicitly - provider = DSEGSSAPIAuthProvider(service='test', qops=QOP.all, resolve_host_name=False) - authenticator = provider.new_authenticator('127.0.0.1') - self.assertEqual(authenticator.sasl.host, '127.0.0.1') -