3232import urllib3
3333import yaml
3434
35- from elasticsearch import ApiError , Elasticsearch , ElasticsearchWarning , RequestError
35+ from elasticsearch import ApiError , ElasticsearchWarning , RequestError
3636from elasticsearch ._sync .client .utils import _base64_auth_header
3737from elasticsearch .compat import string_types
3838
39- from ..utils import CA_CERTS , es_url , parse_version
40-
4139# some params had to be changed in python, keep track of them so we can rename
4240# those in the tests accordingly
4341PARAMS_RENAMES = {"from" : "from_" }
7068}
7169
7270# broken YAML tests on some releases
73- SKIP_TESTS = {
74- # Warning about date_histogram.interval deprecation is raised randomly
75- "search/aggregation/250_moving_fn[1]" ,
76- # body: null
77- "indices/simulate_index_template/10_basic[2]" ,
78- # No ML node with sufficient capacity / random ML failing
79- "ml/start_stop_datafeed" ,
80- "ml/post_data" ,
81- "ml/jobs_crud" ,
82- "ml/datafeeds_crud" ,
83- "ml/set_upgrade_mode" ,
84- "ml/reset_job[2]" ,
85- "ml/jobs_get_stats" ,
86- "ml/get_datafeed_stats" ,
87- "ml/get_trained_model_stats" ,
88- "ml/delete_job_force" ,
89- "ml/jobs_get_result_overall_buckets" ,
90- "ml/bucket_correlation_agg[0]" ,
91- "ml/job_groups" ,
92- "transform/transforms_stats_continuous[0]" ,
93- # Fails bad request instead of 404?
94- "ml/inference_crud" ,
95- # rollup/security_tests time out?
96- "rollup/security_tests" ,
97- # Our TLS certs are custom
98- "ssl/10_basic[0]" ,
99- # Our user is custom
100- "users/10_basic[3]" ,
101- # License warning not sent?
102- "license/30_enterprise_license[0]" ,
103- # Shards/snapshots aren't right?
104- "searchable_snapshots/10_usage[1]" ,
105- # flaky data streams?
106- "data_stream/10_basic[1]" ,
107- "data_stream/80_resolve_index_data_streams[1]" ,
108- # bad formatting?
109- "cat/allocation/10_basic" ,
110- "runtime_fields/10_keyword[8]" ,
111- # service account number not right?
112- "service_accounts/10_basic[1]" ,
113- # doesn't use 'contains' properly?
114- "xpack/10_basic[0]" ,
115- "privileges/40_get_user_privs[0]" ,
116- "privileges/40_get_user_privs[1]" ,
117- "features/get_features/10_basic[0]" ,
118- "features/reset_features/10_basic[0]" ,
119- # bad use of 'is_false'?
120- "indices/get_alias/10_basic[22]" ,
121- # unique usage of 'set'
122- "indices/stats/50_disk_usage[0]" ,
123- "indices/stats/60_field_usage[0]" ,
124- # actual Elasticsearch failure?
125- "transform/transforms_stats" ,
126- "transform/transforms_cat_apis" ,
127- "transform/transforms_update" ,
71+ FAILING_TESTS = {
72+ # ping has a custom implementation in Python and returns a boolean
73+ "ping/ping" ,
74+ # Not investigated yet
75+ "cat/aliases" ,
76+ "cat/fielddata" ,
77+ "cluster/delete_voting_config_exclusions" ,
78+ "cluster/voting_config_exclusions" ,
79+ "entsearch/10_basic" ,
80+ "indices/clone" ,
81+ "indices/resolve_cluster" ,
82+ "indices/settings" ,
83+ "indices/split" ,
84+ "indices/simulate_template_stack" ,
85+ "logstash/10_basic" ,
86+ "machine_learning/30_trained_model_stack" ,
87+ "machine_learning/jobs_crud" ,
88+ "scroll/10_basic" ,
89+ "security/10_api_key_basic" ,
90+ "transform/10_basic" ,
91+ }
92+ SKIPPED_TESTS = {
93+ # Timeouts
94+ # https://github.com/elastic/elasticsearch-serverless-python/issues/63
95+ "cluster/cluster_info[0]" ,
96+ "inference/10_basic[0]" ,
97+ "machine_learning/20_trained_model[0]" ,
12898}
12999
130100
131101XPACK_FEATURES = None
132- ES_VERSION = None
133102RUN_ASYNC_REST_API_TESTS = os .environ .get ("PYTHON_CONNECTION_CLASS" ) == "requests"
134103
135104FALSEY_VALUES = ("" , None , False , 0 , 0.0 )
@@ -173,16 +142,6 @@ def teardown(self):
173142 self .section ("teardown" )
174143 self .run_code (self ._teardown_code )
175144
176- def es_version (self ):
177- global ES_VERSION
178- if ES_VERSION is None :
179- version_string = (self .client .info ())["version" ]["number" ]
180- if "." not in version_string :
181- return ()
182- version = version_string .strip ().split ("." )
183- ES_VERSION = tuple (int (v ) if v .isdigit () else 999 for v in version )
184- return ES_VERSION
185-
186145 def section (self , name ):
187146 print (("=" * 10 ) + " " + name + " " + ("=" * 10 ))
188147
@@ -331,16 +290,6 @@ def run_skip(self, skip):
331290 continue
332291 pytest .skip (f"feature '{ feature } ' is not supported" )
333292
334- if "version" in skip :
335- version , reason = skip ["version" ], skip ["reason" ]
336- if version == "all" :
337- pytest .skip (reason )
338- min_version , _ , max_version = version .partition ("-" )
339- min_version = parse_version (min_version .strip ()) or (0 ,)
340- max_version = parse_version (max_version .strip ()) or (999 ,)
341- if min_version <= (self .es_version ()) <= max_version :
342- pytest .skip (reason )
343-
344293 def run_gt (self , action ):
345294 for key , value in action .items ():
346295 value = self ._resolve (value )
@@ -516,8 +465,9 @@ def _skip_intentional_type_errors(self, e: Exception):
516465
517466
518467@pytest .fixture (scope = "function" )
519- def sync_runner (sync_client ):
520- return YamlRunner (sync_client )
468+ def sync_runner (sync_client_factory ):
469+ # sync_client_factory does not wipe the cluster between tests
470+ return YamlRunner (sync_client_factory )
521471
522472
523473# Source: https://stackoverflow.com/a/37958106/5763213
@@ -546,77 +496,54 @@ def remove_implicit_resolver(cls, tag_to_remove):
546496try :
547497 # Construct the HTTP and Elasticsearch client
548498 http = urllib3 .PoolManager (retries = 10 )
549- client = Elasticsearch (es_url (), request_timeout = 3 , ca_certs = CA_CERTS )
550-
551- # Make a request to Elasticsearch for the build hash, we'll be looking for
552- # an artifact with this same hash to download test specs for.
553- client_info = client .info ()
554- version_number = client_info ["version" ]["number" ]
555- build_hash = client_info ["version" ]["build_hash" ]
556-
557- # Now talk to the artifacts API with the 'STACK_VERSION' environment variable
558- resp = http .request (
559- "GET" ,
560- f"https://artifacts-api.elastic.co/v1/versions/{ version_number } " ,
499+
500+ yaml_tests_url = (
501+ "https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/main"
561502 )
562- resp = json .loads (resp .data .decode ("utf-8" ))
563-
564- # Look through every build and see if one matches the commit hash
565- # we're looking for. If not it's okay, we'll just use the latest and
566- # hope for the best!
567- builds = resp ["version" ]["builds" ]
568- for build in builds :
569- if build ["projects" ]["elasticsearch" ]["commit_hash" ] == build_hash :
570- break
571- else :
572- build = builds [0 ] # Use the latest
573-
574- # Now we're looking for the 'rest-api-spec-<VERSION>-sources.jar' file
575- # to download and extract in-memory.
576- packages = build ["projects" ]["elasticsearch" ]["packages" ]
577- for package in packages :
578- if re .match (r"rest-resources-zip-.*\.zip" , package ):
579- package_url = packages [package ]["url" ]
580- break
581- else :
582- raise RuntimeError (
583- f"Could not find the package 'rest-resources-zip-*.zip' in build { build !r} "
584- )
585503
586504 # Download the zip and start reading YAML from the files in memory
587- package_zip = zipfile .ZipFile (io .BytesIO (http .request ("GET" , package_url ).data ))
505+ package_zip = zipfile .ZipFile (io .BytesIO (http .request ("GET" , yaml_tests_url ).data ))
506+
588507 for yaml_file in package_zip .namelist ():
589- if not re .match (r"^rest-api-spec/test /.*\.ya?ml$" , yaml_file ):
508+ if not re .match (r"^.*\/tests\ /.*\.ya?ml$" , yaml_file ):
590509 continue
591510 yaml_tests = list (
592511 yaml .load_all (package_zip .read (yaml_file ), Loader = NoDatesSafeLoader )
593512 )
594513
595- # Each file may have a "test" named 'setup' or 'teardown',
596- # these sets of steps should be run at the beginning and end
597- # of every other test within the file so we do one pass to capture those.
598- setup_steps = teardown_steps = None
514+ # Each file has a `requires` section with `serverless` and `stack`
515+ # boolean entries indicating whether the test should run with
516+ # serverless, stack or both. Additionally, each file may have a section
517+ # named 'setup' or 'teardown', these sets of steps should be run at the
518+ # beginning and end of every other test within the file so we do one
519+ # pass to capture those.
520+ requires = setup_steps = teardown_steps = None
599521 test_numbers_and_steps = []
600522 test_number = 0
601523
602524 for yaml_test in yaml_tests :
603525 test_name , test_step = yaml_test .popitem ()
604- if test_name == "setup" :
526+ if test_name == "requires" :
527+ requires = test_step
528+ elif test_name == "setup" :
605529 setup_steps = test_step
606530 elif test_name == "teardown" :
607531 teardown_steps = test_step
608532 else :
609533 test_numbers_and_steps .append ((test_number , test_step ))
610534 test_number += 1
611535
536+ if not requires ["stack" ]:
537+ continue
538+
612539 # Now we combine setup, teardown, and test_steps into
613540 # a set of pytest.param() instances
614541 for test_number , test_step in test_numbers_and_steps :
615- # Build the id from the name of the YAML file and
616- # the number within that file. Most important step
617- # is to remove most of the file path prefixes and
618- # the .yml suffix.
619- pytest_test_name = yaml_file .rpartition ("." )[0 ].replace ("." , "/" )
542+ # Build the id from the name of the YAML file and the number within
543+ # that file. Most important step is to remove most of the file path
544+ # prefixes and the .yml suffix.
545+ test_path = "/" . join ( yaml_file . split ( "/" )[ 2 :])
546+ pytest_test_name = test_path .rpartition ("." )[0 ].replace ("." , "/" )
620547 for prefix in ("rest-api-spec/" , "test/" , "free/" , "platinum/" ):
621548 if pytest_test_name .startswith (prefix ):
622549 pytest_test_name = pytest_test_name [len (prefix ) :]
@@ -628,7 +555,9 @@ def remove_implicit_resolver(cls, tag_to_remove):
628555 "teardown" : teardown_steps ,
629556 }
630557 # Skip either 'test_name' or 'test_name[x]'
631- if pytest_test_name in SKIP_TESTS or pytest_param_id in SKIP_TESTS :
558+ if pytest_test_name in FAILING_TESTS or pytest_param_id in FAILING_TESTS :
559+ pytest_param ["fail" ] = True
560+ elif pytest_test_name in SKIPPED_TESTS or pytest_param_id in SKIPPED_TESTS :
632561 pytest_param ["skip" ] = True
633562
634563 YAML_TEST_SPECS .append (pytest .param (pytest_param , id = pytest_param_id ))
@@ -645,12 +574,13 @@ def _pytest_param_sort_key(param: pytest.param) -> Tuple[Union[str, int], ...]:
645574# Sort the tests by ID so they're grouped together nicely.
646575YAML_TEST_SPECS = sorted (YAML_TEST_SPECS , key = _pytest_param_sort_key )
647576
648-
649577if not RUN_ASYNC_REST_API_TESTS :
650578
651579 @pytest .mark .parametrize ("test_spec" , YAML_TEST_SPECS )
652580 def test_rest_api_spec (test_spec , sync_runner ):
653- if test_spec .get ("skip" , False ):
654- pytest .skip ("Manually skipped in 'SKIP_TESTS'" )
581+ if test_spec .get ("fail" , False ):
582+ pytest .xfail ("Manually marked as failing in 'FAILING_TESTS'" )
583+ elif test_spec .get ("skip" , False ):
584+ pytest .skip ("Manually marked as skipped" )
655585 sync_runner .use_spec (test_spec )
656586 sync_runner .run ()
0 commit comments