Skip to content

Commit 8adc6b2

Browse files
authored
test: remove deprecated claude-3-opus tests (#2375)
Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigquery-dataframes/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes #<issue_number_goes_here> 🦕
1 parent 34b5975 commit 8adc6b2

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

bigframes/ml/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -873,7 +873,7 @@ class Claude3TextGenerator(base.RetriableRemotePredictor):
873873
"claude-3-sonnet" (deprecated) is Anthropic's dependable combination of skills and speed. It is engineered to be dependable for scaled AI deployments across a variety of use cases.
874874
"claude-3-haiku" is Anthropic's fastest, most compact vision and text model for near-instant responses to simple queries, meant for seamless AI experiences mimicking human interactions.
875875
"claude-3-5-sonnet" is Anthropic's most powerful AI model and maintains the speed and cost of Claude 3 Sonnet, which is a mid-tier model.
876-
"claude-3-opus" is Anthropic's second-most powerful AI model, with strong performance on highly complex tasks.
876+
"claude-3-opus" (deprecated) is Anthropic's second-most powerful AI model, with strong performance on highly complex tasks.
877877
https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#available-claude-models
878878
If no setting is provided, "claude-3-sonnet" will be used by default
879879
and a warning will be issued.

tests/system/load/test_llm.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -100,13 +100,13 @@ def test_llm_gemini_w_ground_with_google_search(llm_remote_text_df):
100100
# (b/366290533): Claude models are of extremely low capacity. The tests should reside in small tests. Moving these here just to protect BQML's shared capacity(as load test only runs once per day.) and make sure we still have minimum coverage.
101101
@pytest.mark.parametrize(
102102
"model_name",
103-
("claude-3-haiku", "claude-3-5-sonnet", "claude-3-opus"),
103+
("claude-3-haiku", "claude-3-5-sonnet"),
104104
)
105105
@pytest.mark.flaky(retries=3, delay=120)
106106
def test_claude3_text_generator_create_load(
107107
dataset_id, model_name, session, session_us_east5, bq_connection
108108
):
109-
if model_name in ("claude-3-5-sonnet", "claude-3-opus"):
109+
if model_name in ("claude-3-5-sonnet",):
110110
session = session_us_east5
111111
claude3_text_generator_model = llm.Claude3TextGenerator(
112112
model_name=model_name, connection_name=bq_connection, session=session
@@ -125,13 +125,13 @@ def test_claude3_text_generator_create_load(
125125

126126
@pytest.mark.parametrize(
127127
"model_name",
128-
("claude-3-haiku", "claude-3-5-sonnet", "claude-3-opus"),
128+
("claude-3-haiku", "claude-3-5-sonnet"),
129129
)
130130
@pytest.mark.flaky(retries=3, delay=120)
131131
def test_claude3_text_generator_predict_default_params_success(
132132
llm_text_df, model_name, session, session_us_east5, bq_connection
133133
):
134-
if model_name in ("claude-3-5-sonnet", "claude-3-opus"):
134+
if model_name in ("claude-3-5-sonnet",):
135135
session = session_us_east5
136136
claude3_text_generator_model = llm.Claude3TextGenerator(
137137
model_name=model_name, connection_name=bq_connection, session=session
@@ -144,13 +144,13 @@ def test_claude3_text_generator_predict_default_params_success(
144144

145145
@pytest.mark.parametrize(
146146
"model_name",
147-
("claude-3-haiku", "claude-3-5-sonnet", "claude-3-opus"),
147+
("claude-3-haiku", "claude-3-5-sonnet"),
148148
)
149149
@pytest.mark.flaky(retries=3, delay=120)
150150
def test_claude3_text_generator_predict_with_params_success(
151151
llm_text_df, model_name, session, session_us_east5, bq_connection
152152
):
153-
if model_name in ("claude-3-5-sonnet", "claude-3-opus"):
153+
if model_name in ("claude-3-5-sonnet",):
154154
session = session_us_east5
155155
claude3_text_generator_model = llm.Claude3TextGenerator(
156156
model_name=model_name, connection_name=bq_connection, session=session
@@ -165,13 +165,13 @@ def test_claude3_text_generator_predict_with_params_success(
165165

166166
@pytest.mark.parametrize(
167167
"model_name",
168-
("claude-3-haiku", "claude-3-5-sonnet", "claude-3-opus"),
168+
("claude-3-haiku", "claude-3-5-sonnet"),
169169
)
170170
@pytest.mark.flaky(retries=3, delay=120)
171171
def test_claude3_text_generator_predict_multi_col_success(
172172
llm_text_df, model_name, session, session_us_east5, bq_connection
173173
):
174-
if model_name in ("claude-3-5-sonnet", "claude-3-opus"):
174+
if model_name in ("claude-3-5-sonnet",):
175175
session = session_us_east5
176176

177177
llm_text_df["additional_col"] = 1

0 commit comments

Comments
 (0)