|
46 | 46 | from huggingface_hub.inference._providers.novita import NovitaConversationalTask, NovitaTextGenerationTask |
47 | 47 | from huggingface_hub.inference._providers.nscale import NscaleConversationalTask, NscaleTextToImageTask |
48 | 48 | from huggingface_hub.inference._providers.openai import OpenAIConversationalTask |
| 49 | +from huggingface_hub.inference._providers.ovhcloud import OVHcloudAIEndpointsAutomaticSpeechRecognitionTask, OVHcloudAIEndpointsConversationalTask, OVHcloudAIEndpointsFeatureExtractionTask, OVHcloudAIEndpointsTextToImageTask |
49 | 50 | from huggingface_hub.inference._providers.publicai import PublicAIConversationalTask |
50 | 51 | from huggingface_hub.inference._providers.replicate import ( |
51 | 52 | ReplicateImageToImageTask, |
@@ -1422,6 +1423,142 @@ def test_prepare_url(self): |
1422 | 1423 | assert helper._prepare_url("sk-XXXXXX", "gpt-4o-mini") == "https://api.openai.com/v1/chat/completions" |
1423 | 1424 |
|
1424 | 1425 |
|
| 1426 | +class TestOVHcloudAIEndpointsProvider: |
| 1427 | + def test_prepare_hf_url_conversational(self): |
| 1428 | + helper = OVHcloudAIEndpointsConversationalTask() |
| 1429 | + url = helper._prepare_url("hf_token", "username/repo_name") |
| 1430 | + assert url == "https://router.huggingface.co/ovhcloud/v1/chat/completions" |
| 1431 | + |
| 1432 | + def test_prepare_url_conversational(self): |
| 1433 | + helper = OVHcloudAIEndpointsConversationalTask() |
| 1434 | + url = helper._prepare_url("ovhcloud_token", "username/repo_name") |
| 1435 | + assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" |
| 1436 | + |
| 1437 | + def test_prepare_payload_as_dict(self): |
| 1438 | + helper = OVHcloudAIEndpointsConversationalTask() |
| 1439 | + payload = helper._prepare_payload_as_dict( |
| 1440 | + [ |
| 1441 | + {"role": "system", "content": "You are a helpful assistant"}, |
| 1442 | + {"role": "user", "content": "Hello!"}, |
| 1443 | + ], |
| 1444 | + { |
| 1445 | + "max_tokens": 512, |
| 1446 | + "temperature": 0.15, |
| 1447 | + "top_p": 1, |
| 1448 | + "presence_penalty": 0, |
| 1449 | + "stream": True, |
| 1450 | + }, |
| 1451 | + InferenceProviderMapping( |
| 1452 | + provider="ovhcloud", |
| 1453 | + hf_model_id="meta-llama/Llama-3.1-8B-Instruct", |
| 1454 | + providerId="Llama-3.1-8B-Instruct", |
| 1455 | + task="conversational", |
| 1456 | + status="live", |
| 1457 | + ), |
| 1458 | + ) |
| 1459 | + assert payload == { |
| 1460 | + "max_tokens": 512, |
| 1461 | + "messages": [ |
| 1462 | + {"content": "You are a helpful assistant", "role": "system"}, |
| 1463 | + {"role": "user", "content": "Hello!"}, |
| 1464 | + ], |
| 1465 | + "model": "Llama-3.1-8B-Instruct", |
| 1466 | + "presence_penalty": 0, |
| 1467 | + "stream": True, |
| 1468 | + "temperature": 0.15, |
| 1469 | + "top_p": 1, |
| 1470 | + } |
| 1471 | + |
| 1472 | + def test_prepare_url_feature_extraction(self): |
| 1473 | + helper = OVHcloudAIEndpointsFeatureExtractionTask() |
| 1474 | + assert ( |
| 1475 | + helper._prepare_url("hf_token", "username/repo_name") |
| 1476 | + == "https://router.huggingface.co/ovhcloud/v1/embeddings" |
| 1477 | + ) |
| 1478 | + |
| 1479 | + def test_prepare_payload_as_dict_feature_extraction(self): |
| 1480 | + helper = OVHcloudAIEndpointsFeatureExtractionTask() |
| 1481 | + payload = helper._prepare_payload_as_dict( |
| 1482 | + "Example text to embed", |
| 1483 | + {"truncate": True}, |
| 1484 | + InferenceProviderMapping( |
| 1485 | + provider="ovhcloud", |
| 1486 | + hf_model_id="BAAI/bge-m3", |
| 1487 | + providerId="BGE-M3", |
| 1488 | + task="feature-extraction", |
| 1489 | + status="live", |
| 1490 | + ), |
| 1491 | + ) |
| 1492 | + assert payload == {"input": "Example text to embed", "model": "BGE-M3", "truncate": True} |
| 1493 | + |
| 1494 | + def test_prepare_url_text_to_image(self): |
| 1495 | + helper = OVHcloudAIEndpointsTextToImageTask() |
| 1496 | + assert ( |
| 1497 | + helper._prepare_url("hf_token", "username/repo_name") |
| 1498 | + == "https://router.huggingface.co/ovhcloud/v1/images/generations" |
| 1499 | + ) |
| 1500 | + |
| 1501 | + url = helper._prepare_url("ovhcloud_token", "username/repo_name") |
| 1502 | + assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/images/generations" |
| 1503 | + |
| 1504 | + def test_prepare_payload_as_dict_text_to_image(self): |
| 1505 | + helper = OVHcloudAIEndpointsTextToImageTask() |
| 1506 | + payload = helper._prepare_payload_as_dict( |
| 1507 | + inputs="a beautiful cat", |
| 1508 | + provider_mapping_info=InferenceProviderMapping( |
| 1509 | + provider="ovhcloud", |
| 1510 | + hf_model_id="stabilityai/stable-diffusion-xl-base-1.0", |
| 1511 | + providerId="stable-diffusion-xl-base-v10", |
| 1512 | + task="text-to-image", |
| 1513 | + status="live", |
| 1514 | + ), |
| 1515 | + parameters={} |
| 1516 | + ) |
| 1517 | + assert payload == { |
| 1518 | + "prompt": "a beautiful cat", |
| 1519 | + "model": "stable-diffusion-xl-base-v10", |
| 1520 | + } |
| 1521 | + |
| 1522 | + def test_text_to_image_get_response(self): |
| 1523 | + helper = OVHcloudAIEndpointsTextToImageTask() |
| 1524 | + response = helper.get_response({"data": [{"b64_json": base64.b64encode(b"image_bytes").decode()}]}) |
| 1525 | + assert response == b"image_bytes" |
| 1526 | + |
| 1527 | + def test_prepare_url_automatic_speech_recognition(self): |
| 1528 | + helper = OVHcloudAIEndpointsAutomaticSpeechRecognitionTask() |
| 1529 | + assert ( |
| 1530 | + helper._prepare_url("hf_token", "username/repo_name") |
| 1531 | + == "https://router.huggingface.co/ovhcloud/v1/audio/transcriptions" |
| 1532 | + ) |
| 1533 | + |
| 1534 | + url = helper._prepare_url("ovhcloud_token", "username/repo_name") |
| 1535 | + assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/audio/transcriptions" |
| 1536 | + |
| 1537 | + def test_prepare_payload_as_dict_automatic_speech_recognition(self): |
| 1538 | + helper = OVHcloudAIEndpointsAutomaticSpeechRecognitionTask() |
| 1539 | + |
| 1540 | + payload = helper._prepare_payload_as_dict( |
| 1541 | + f"data:audio/mpeg;base64,{base64.b64encode(b'dummy_audio_data').decode()}", |
| 1542 | + {}, |
| 1543 | + InferenceProviderMapping( |
| 1544 | + provider="ovhcloud", |
| 1545 | + hf_model_id="openai/whisper-large-v3", |
| 1546 | + providerId="whisper-large-v3", |
| 1547 | + task="automatic-speech-recognition", |
| 1548 | + status="live", |
| 1549 | + ), |
| 1550 | + ) |
| 1551 | + assert payload == { |
| 1552 | + "file": f"data:audio/mpeg;base64,{base64.b64encode(b'dummy_audio_data').decode()}", |
| 1553 | + "model": "whisper-large-v3", |
| 1554 | + } |
| 1555 | + |
| 1556 | + def test_automatic_speech_recognition_get_response(self): |
| 1557 | + helper = OVHcloudAIEndpointsAutomaticSpeechRecognitionTask() |
| 1558 | + response = helper.get_response({"text": "Hello world"}) |
| 1559 | + assert response == "Hello world" |
| 1560 | + |
| 1561 | + |
1425 | 1562 | class TestReplicateProvider: |
1426 | 1563 | def test_prepare_headers(self): |
1427 | 1564 | helper = ReplicateTask("text-to-image") |
|
0 commit comments