|
| 1 | +# This file was auto-generated by Fern from our API Definition. |
| 2 | + |
| 3 | +import typing |
| 4 | +from ..core.client_wrapper import SyncClientWrapper |
| 5 | +from ..core.request_options import RequestOptions |
| 6 | +from ..errors.unprocessable_entity_error import UnprocessableEntityError |
| 7 | +from ..types.http_validation_error import HttpValidationError |
| 8 | +from ..core.unchecked_base_model import construct_type |
| 9 | +from json.decoder import JSONDecodeError |
| 10 | +from ..core.api_error import ApiError |
| 11 | +from ..core.client_wrapper import AsyncClientWrapper |
| 12 | + |
| 13 | +# this is used as the default value for optional parameters |
| 14 | +OMIT = typing.cast(typing.Any, ...) |
| 15 | + |
| 16 | + |
| 17 | +class TextToSoundEffectsClient: |
| 18 | + def __init__(self, *, client_wrapper: SyncClientWrapper): |
| 19 | + self._client_wrapper = client_wrapper |
| 20 | + |
| 21 | + def convert( |
| 22 | + self, |
| 23 | + *, |
| 24 | + text: str, |
| 25 | + duration_seconds: typing.Optional[float] = OMIT, |
| 26 | + prompt_influence: typing.Optional[float] = OMIT, |
| 27 | + request_options: typing.Optional[RequestOptions] = None, |
| 28 | + ) -> typing.Iterator[bytes]: |
| 29 | + """ |
| 30 | + Converts a text of your choice into sound |
| 31 | +
|
| 32 | + Parameters |
| 33 | + ---------- |
| 34 | + text : str |
| 35 | + The text that will get converted into a sound effect. |
| 36 | +
|
| 37 | + duration_seconds : typing.Optional[float] |
| 38 | + The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. |
| 39 | +
|
| 40 | + prompt_influence : typing.Optional[float] |
| 41 | + A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. |
| 42 | +
|
| 43 | + request_options : typing.Optional[RequestOptions] |
| 44 | + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. |
| 45 | +
|
| 46 | + Yields |
| 47 | + ------ |
| 48 | + typing.Iterator[bytes] |
| 49 | + Successful Response |
| 50 | +
|
| 51 | + Examples |
| 52 | + -------- |
| 53 | + from neuralaudio import NeuralAudio |
| 54 | +
|
| 55 | + client = NeuralAudio( |
| 56 | + api_key="YOUR_API_KEY", |
| 57 | + ) |
| 58 | + client.text_to_sound_effects.convert( |
| 59 | + text="Spacious braam suitable for high-impact movie trailer moments", |
| 60 | + ) |
| 61 | + """ |
| 62 | + with self._client_wrapper.httpx_client.stream( |
| 63 | + "v1/sound-generation", |
| 64 | + method="POST", |
| 65 | + json={ |
| 66 | + "text": text, |
| 67 | + "duration_seconds": duration_seconds, |
| 68 | + "prompt_influence": prompt_influence, |
| 69 | + }, |
| 70 | + headers={ |
| 71 | + "content-type": "application/json", |
| 72 | + }, |
| 73 | + request_options=request_options, |
| 74 | + omit=OMIT, |
| 75 | + ) as _response: |
| 76 | + try: |
| 77 | + if 200 <= _response.status_code < 300: |
| 78 | + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 |
| 79 | + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): |
| 80 | + yield _chunk |
| 81 | + return |
| 82 | + _response.read() |
| 83 | + if _response.status_code == 422: |
| 84 | + raise UnprocessableEntityError( |
| 85 | + typing.cast( |
| 86 | + HttpValidationError, |
| 87 | + construct_type( |
| 88 | + type_=HttpValidationError, # type: ignore |
| 89 | + object_=_response.json(), |
| 90 | + ), |
| 91 | + ) |
| 92 | + ) |
| 93 | + _response_json = _response.json() |
| 94 | + except JSONDecodeError: |
| 95 | + raise ApiError(status_code=_response.status_code, body=_response.text) |
| 96 | + raise ApiError(status_code=_response.status_code, body=_response_json) |
| 97 | + |
| 98 | + |
| 99 | +class AsyncTextToSoundEffectsClient: |
| 100 | + def __init__(self, *, client_wrapper: AsyncClientWrapper): |
| 101 | + self._client_wrapper = client_wrapper |
| 102 | + |
| 103 | + async def convert( |
| 104 | + self, |
| 105 | + *, |
| 106 | + text: str, |
| 107 | + duration_seconds: typing.Optional[float] = OMIT, |
| 108 | + prompt_influence: typing.Optional[float] = OMIT, |
| 109 | + request_options: typing.Optional[RequestOptions] = None, |
| 110 | + ) -> typing.AsyncIterator[bytes]: |
| 111 | + """ |
| 112 | + Converts a text of your choice into sound |
| 113 | +
|
| 114 | + Parameters |
| 115 | + ---------- |
| 116 | + text : str |
| 117 | + The text that will get converted into a sound effect. |
| 118 | +
|
| 119 | + duration_seconds : typing.Optional[float] |
| 120 | + The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. |
| 121 | +
|
| 122 | + prompt_influence : typing.Optional[float] |
| 123 | + A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. |
| 124 | +
|
| 125 | + request_options : typing.Optional[RequestOptions] |
| 126 | + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. |
| 127 | +
|
| 128 | + Yields |
| 129 | + ------ |
| 130 | + typing.AsyncIterator[bytes] |
| 131 | + Successful Response |
| 132 | +
|
| 133 | + Examples |
| 134 | + -------- |
| 135 | + import asyncio |
| 136 | +
|
| 137 | + from neuralaudio import AsyncNeuralAudio |
| 138 | +
|
| 139 | + client = AsyncNeuralAudio( |
| 140 | + api_key="YOUR_API_KEY", |
| 141 | + ) |
| 142 | +
|
| 143 | +
|
| 144 | + async def main() -> None: |
| 145 | + await client.text_to_sound_effects.convert( |
| 146 | + text="Spacious braam suitable for high-impact movie trailer moments", |
| 147 | + ) |
| 148 | +
|
| 149 | +
|
| 150 | + asyncio.run(main()) |
| 151 | + """ |
| 152 | + async with self._client_wrapper.httpx_client.stream( |
| 153 | + "v1/sound-generation", |
| 154 | + method="POST", |
| 155 | + json={ |
| 156 | + "text": text, |
| 157 | + "duration_seconds": duration_seconds, |
| 158 | + "prompt_influence": prompt_influence, |
| 159 | + }, |
| 160 | + headers={ |
| 161 | + "content-type": "application/json", |
| 162 | + }, |
| 163 | + request_options=request_options, |
| 164 | + omit=OMIT, |
| 165 | + ) as _response: |
| 166 | + try: |
| 167 | + if 200 <= _response.status_code < 300: |
| 168 | + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 |
| 169 | + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): |
| 170 | + yield _chunk |
| 171 | + return |
| 172 | + await _response.aread() |
| 173 | + if _response.status_code == 422: |
| 174 | + raise UnprocessableEntityError( |
| 175 | + typing.cast( |
| 176 | + HttpValidationError, |
| 177 | + construct_type( |
| 178 | + type_=HttpValidationError, # type: ignore |
| 179 | + object_=_response.json(), |
| 180 | + ), |
| 181 | + ) |
| 182 | + ) |
| 183 | + _response_json = _response.json() |
| 184 | + except JSONDecodeError: |
| 185 | + raise ApiError(status_code=_response.status_code, body=_response.text) |
| 186 | + raise ApiError(status_code=_response.status_code, body=_response_json) |
0 commit comments