@@ -847,56 +847,6 @@ def test_count_tokens_smoke(self, kwargs):
847
847
{"total_tokens" : 7 },
848
848
)
849
849
850
- @parameterized .named_parameters (
851
- [
852
- "GenerateContentResponse" ,
853
- generation_types .GenerateContentResponse ,
854
- generation_types .AsyncGenerateContentResponse ,
855
- ],
856
- [
857
- "GenerativeModel.generate_response" ,
858
- generative_models .GenerativeModel .generate_content ,
859
- generative_models .GenerativeModel .generate_content_async ,
860
- ],
861
- [
862
- "GenerativeModel.count_tokens" ,
863
- generative_models .GenerativeModel .count_tokens ,
864
- generative_models .GenerativeModel .count_tokens_async ,
865
- ],
866
- [
867
- "ChatSession.send_message" ,
868
- generative_models .ChatSession .send_message ,
869
- generative_models .ChatSession .send_message_async ,
870
- ],
871
- [
872
- "ChatSession._handle_afc" ,
873
- generative_models .ChatSession ._handle_afc ,
874
- generative_models .ChatSession ._handle_afc_async ,
875
- ],
876
- )
877
- def test_async_code_match (self , obj , aobj ):
878
- import inspect
879
- import re
880
-
881
- source = inspect .getsource (obj )
882
- asource = inspect .getsource (aobj )
883
-
884
- source = re .sub ('""".*"""' , "" , source , flags = re .DOTALL )
885
- asource = re .sub ('""".*"""' , "" , asource , flags = re .DOTALL )
886
-
887
- asource = (
888
- asource .replace ("anext" , "next" )
889
- .replace ("aiter" , "iter" )
890
- .replace ("_async" , "" )
891
- .replace ("async " , "" )
892
- .replace ("await " , "" )
893
- .replace ("Async" , "" )
894
- .replace ("ASYNC_" , "" )
895
- )
896
-
897
- asource = re .sub (" *?# type: ignore" , "" , asource )
898
- self .assertEqual (source , asource , f"error in { obj = } " )
899
-
900
850
def test_repr_for_unary_non_streamed_response (self ):
901
851
model = generative_models .GenerativeModel (model_name = "gemini-pro" )
902
852
self .responses ["generate_content" ].append (simple_response ("world!" ))
0 commit comments