|
46 | 46 | from huggingface_hub.inference._providers.novita import NovitaConversationalTask, NovitaTextGenerationTask |
47 | 47 | from huggingface_hub.inference._providers.nscale import NscaleConversationalTask, NscaleTextToImageTask |
48 | 48 | from huggingface_hub.inference._providers.openai import OpenAIConversationalTask |
49 | | -from huggingface_hub.inference._providers.ovhcloud import OVHcloudAIEndpointsAutomaticSpeechRecognitionTask, OVHcloudAIEndpointsConversationalTask, OVHcloudAIEndpointsFeatureExtractionTask, OVHcloudAIEndpointsTextToImageTask |
| 49 | +from huggingface_hub.inference._providers.ovhcloud import ( |
| 50 | + OVHcloudAIEndpointsConversationalTask, |
| 51 | + OVHcloudAIEndpointsTextGenerationTask, |
| 52 | +) |
50 | 53 | from huggingface_hub.inference._providers.publicai import PublicAIConversationalTask |
51 | 54 | from huggingface_hub.inference._providers.replicate import ( |
52 | 55 | ReplicateAutomaticSpeechRecognitionTask, |
@@ -1470,94 +1473,62 @@ def test_prepare_payload_as_dict(self): |
1470 | 1473 | "top_p": 1, |
1471 | 1474 | } |
1472 | 1475 |
|
1473 | | - def test_prepare_url_feature_extraction(self): |
1474 | | - helper = OVHcloudAIEndpointsFeatureExtractionTask() |
1475 | | - assert ( |
1476 | | - helper._prepare_url("hf_token", "username/repo_name") |
1477 | | - == "https://router.huggingface.co/ovhcloud/v1/embeddings" |
1478 | | - ) |
| 1476 | + def test_prepare_route_conversational(self): |
| 1477 | + helper = OVHcloudAIEndpointsConversationalTask() |
| 1478 | + assert helper._prepare_route("username/repo_name", "hf_token") == "/v1/chat/completions" |
1479 | 1479 |
|
1480 | | - def test_prepare_payload_as_dict_feature_extraction(self): |
1481 | | - helper = OVHcloudAIEndpointsFeatureExtractionTask() |
1482 | | - payload = helper._prepare_payload_as_dict( |
1483 | | - "Example text to embed", |
1484 | | - {"truncate": True}, |
1485 | | - InferenceProviderMapping( |
1486 | | - provider="ovhcloud", |
1487 | | - hf_model_id="BAAI/bge-m3", |
1488 | | - providerId="BGE-M3", |
1489 | | - task="feature-extraction", |
1490 | | - status="live", |
1491 | | - ), |
1492 | | - ) |
1493 | | - assert payload == {"input": "Example text to embed", "model": "BGE-M3", "truncate": True} |
| 1480 | + def test_prepare_url_text_generation(self): |
| 1481 | + helper = OVHcloudAIEndpointsTextGenerationTask() |
| 1482 | + url = helper._prepare_url("hf_token", "username/repo_name") |
| 1483 | + assert url == "https://router.huggingface.co/ovhcloud/v1/chat/completions" |
1494 | 1484 |
|
1495 | | - def test_prepare_url_text_to_image(self): |
1496 | | - helper = OVHcloudAIEndpointsTextToImageTask() |
1497 | | - assert ( |
1498 | | - helper._prepare_url("hf_token", "username/repo_name") |
1499 | | - == "https://router.huggingface.co/ovhcloud/v1/images/generations" |
1500 | | - ) |
1501 | | - |
1502 | 1485 | url = helper._prepare_url("ovhcloud_token", "username/repo_name") |
1503 | | - assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/images/generations" |
1504 | | - |
1505 | | - def test_prepare_payload_as_dict_text_to_image(self): |
1506 | | - helper = OVHcloudAIEndpointsTextToImageTask() |
1507 | | - payload = helper._prepare_payload_as_dict( |
1508 | | - inputs="a beautiful cat", |
1509 | | - provider_mapping_info=InferenceProviderMapping( |
1510 | | - provider="ovhcloud", |
1511 | | - hf_model_id="stabilityai/stable-diffusion-xl-base-1.0", |
1512 | | - providerId="stable-diffusion-xl-base-v10", |
1513 | | - task="text-to-image", |
1514 | | - status="live", |
1515 | | - ), |
1516 | | - parameters={} |
1517 | | - ) |
1518 | | - assert payload == { |
1519 | | - "prompt": "a beautiful cat", |
1520 | | - "model": "stable-diffusion-xl-base-v10", |
1521 | | - } |
1522 | | - |
1523 | | - def test_text_to_image_get_response(self): |
1524 | | - helper = OVHcloudAIEndpointsTextToImageTask() |
1525 | | - response = helper.get_response({"data": [{"b64_json": base64.b64encode(b"image_bytes").decode()}]}) |
1526 | | - assert response == b"image_bytes" |
| 1486 | + assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" |
1527 | 1487 |
|
1528 | | - def test_prepare_url_automatic_speech_recognition(self): |
1529 | | - helper = OVHcloudAIEndpointsAutomaticSpeechRecognitionTask() |
1530 | | - assert ( |
1531 | | - helper._prepare_url("hf_token", "username/repo_name") |
1532 | | - == "https://router.huggingface.co/ovhcloud/v1/audio/transcriptions" |
1533 | | - ) |
1534 | | - |
1535 | | - url = helper._prepare_url("ovhcloud_token", "username/repo_name") |
1536 | | - assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/audio/transcriptions" |
| 1488 | + def test_prepare_route_text_generation(self): |
| 1489 | + helper = OVHcloudAIEndpointsTextGenerationTask() |
| 1490 | + assert helper._prepare_route("username/repo_name", "hf_token") == "/v1/chat/completions" |
1537 | 1491 |
|
1538 | | - def test_prepare_payload_as_dict_automatic_speech_recognition(self): |
1539 | | - helper = OVHcloudAIEndpointsAutomaticSpeechRecognitionTask() |
1540 | | - |
| 1492 | + def test_prepare_payload_as_dict_text_generation(self): |
| 1493 | + helper = OVHcloudAIEndpointsTextGenerationTask() |
1541 | 1494 | payload = helper._prepare_payload_as_dict( |
1542 | | - f"data:audio/mpeg;base64,{base64.b64encode(b'dummy_audio_data').decode()}", |
1543 | | - {}, |
| 1495 | + "Once upon a time", |
| 1496 | + {"temperature": 0.7, "max_tokens": 100}, |
1544 | 1497 | InferenceProviderMapping( |
1545 | 1498 | provider="ovhcloud", |
1546 | | - hf_model_id="openai/whisper-large-v3", |
1547 | | - providerId="whisper-large-v3", |
1548 | | - task="automatic-speech-recognition", |
| 1499 | + hf_model_id="meta-llama/Llama-3.1-8B-Instruct", |
| 1500 | + providerId="Llama-3.1-8B-Instruct", |
| 1501 | + task="text-generation", |
1549 | 1502 | status="live", |
1550 | 1503 | ), |
1551 | 1504 | ) |
1552 | 1505 | assert payload == { |
1553 | | - "file": f"data:audio/mpeg;base64,{base64.b64encode(b'dummy_audio_data').decode()}", |
1554 | | - "model": "whisper-large-v3", |
| 1506 | + "prompt": "Once upon a time", |
| 1507 | + "temperature": 0.7, |
| 1508 | + "max_tokens": 100, |
| 1509 | + "model": "Llama-3.1-8B-Instruct", |
1555 | 1510 | } |
1556 | 1511 |
|
1557 | | - def test_automatic_speech_recognition_get_response(self): |
1558 | | - helper = OVHcloudAIEndpointsAutomaticSpeechRecognitionTask() |
1559 | | - response = helper.get_response({"text": "Hello world"}) |
1560 | | - assert response == "Hello world" |
| 1512 | + def test_text_generation_get_response(self): |
| 1513 | + helper = OVHcloudAIEndpointsTextGenerationTask() |
| 1514 | + response = helper.get_response( |
| 1515 | + { |
| 1516 | + "choices": [ |
| 1517 | + { |
| 1518 | + "text": " there was a beautiful princess", |
| 1519 | + "finish_reason": "stop", |
| 1520 | + "seed": 42, |
| 1521 | + } |
| 1522 | + ] |
| 1523 | + } |
| 1524 | + ) |
| 1525 | + assert response == { |
| 1526 | + "generated_text": " there was a beautiful princess", |
| 1527 | + "details": { |
| 1528 | + "finish_reason": "stop", |
| 1529 | + "seed": 42, |
| 1530 | + }, |
| 1531 | + } |
1561 | 1532 |
|
1562 | 1533 |
|
1563 | 1534 | class TestReplicateProvider: |
|
0 commit comments