Skip to content

Commit 077c402

Browse files
author
Ammar Dodin
committed
🔥 remove unused interfaces
1 parent 96e63c5 commit 077c402

File tree

1 file changed

+0
-161
lines changed

1 file changed

+0
-161
lines changed

speech-to-text/v1-generated.ts

Lines changed: 0 additions & 161 deletions
Original file line numberDiff line numberDiff line change
@@ -1759,92 +1759,6 @@ namespace SpeechToTextV1 {
17591759
/** Parameters for the `listModels` operation. **/
17601760
export interface ListModelsParams {}
17611761

1762-
/** Parameters for the `recognizeSessionless` operation. **/
1763-
export interface RecognizeSessionlessParams {
1764-
/** Set to `chunked` to send the audio in streaming mode; the data does not need to exist fully before being streamed to the service. MULTIPART: You must also set this header for requests with more than one audio part. **/
1765-
transfer_encoding?: RecognizeSessionlessConstants.TransferEncoding | string;
1766-
/** The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). **/
1767-
model?: RecognizeSessionlessConstants.Model | string;
1768-
/** The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. **/
1769-
customization_id?: string;
1770-
/** The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. **/
1771-
acoustic_customization_id?: string;
1772-
/** If you specify a `customization_id` with the request, you can use the `customization_weight` parameter to tell the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. **/
1773-
customization_weight?: number;
1774-
/** NON-MULTIPART ONLY: Audio to transcribe in the format specified by the `Content-Type` header. **Required for a non-multipart request.**. **/
1775-
audio?: Blob;
1776-
/** The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. **/
1777-
content_type?: RecognizeSessionlessConstants.ContentType | string;
1778-
/** NON-MULTIPART ONLY: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. **/
1779-
inactivity_timeout?: number;
1780-
/** NON-MULTIPART ONLY: Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. Omit the parameter or specify an empty array if you do not need to spot keywords. **/
1781-
keywords?: string[];
1782-
/** NON-MULTIPART ONLY: Confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. **/
1783-
keywords_threshold?: number;
1784-
/** NON-MULTIPART ONLY: Maximum number of alternative transcripts to be returned. By default, a single transcription is returned. **/
1785-
max_alternatives?: number;
1786-
/** NON-MULTIPART ONLY: Confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. **/
1787-
word_alternatives_threshold?: number;
1788-
/** NON-MULTIPART ONLY: If `true`, confidence measure per word is returned. **/
1789-
word_confidence?: boolean;
1790-
/** NON-MULTIPART ONLY: If `true`, time alignment for each word is returned. **/
1791-
timestamps?: boolean;
1792-
/** NON-MULTIPART ONLY: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. **/
1793-
profanity_filter?: boolean;
1794-
/** NON-MULTIPART ONLY: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only. **/
1795-
smart_formatting?: boolean;
1796-
/** NON-MULTIPART ONLY: Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). **/
1797-
speaker_labels?: boolean;
1798-
/** MULTIPART ONLY: Parameters for the multipart recognition request. This must be the first part of the request and must consist of JSON-formatted data. The information describes the subsequent parts of the request, which pass the audio files to be transcribed. **Required for a multipart request.**. **/
1799-
metadata?: string;
1800-
/** MULTIPART ONLY: One or more audio files for the request. For multiple audio files, set `Transfer-Encoding` to `chunked`. **Required for a multipart request.**. **/
1801-
upload?: ReadableStream | FileObject | Buffer;
1802-
/** The content type of upload. **/
1803-
upload_content_type?: string;
1804-
}
1805-
1806-
/** Constants for the `recognizeSessionless` operation. **/
1807-
export namespace RecognizeSessionlessConstants {
1808-
/** Set to `chunked` to send the audio in streaming mode; the data does not need to exist fully before being streamed to the service. MULTIPART: You must also set this header for requests with more than one audio part. **/
1809-
export enum TransferEncoding {
1810-
CHUNKED = 'chunked'
1811-
}
1812-
/** The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). **/
1813-
export enum Model {
1814-
AR_AR_BROADBANDMODEL = 'ar-AR_BroadbandModel',
1815-
EN_GB_BROADBANDMODEL = 'en-GB_BroadbandModel',
1816-
EN_GB_NARROWBANDMODEL = 'en-GB_NarrowbandModel',
1817-
EN_US_BROADBANDMODEL = 'en-US_BroadbandModel',
1818-
EN_US_NARROWBANDMODEL = 'en-US_NarrowbandModel',
1819-
ES_ES_BROADBANDMODEL = 'es-ES_BroadbandModel',
1820-
ES_ES_NARROWBANDMODEL = 'es-ES_NarrowbandModel',
1821-
FR_FR_BROADBANDMODEL = 'fr-FR_BroadbandModel',
1822-
JA_JP_BROADBANDMODEL = 'ja-JP_BroadbandModel',
1823-
JA_JP_NARROWBANDMODEL = 'ja-JP_NarrowbandModel',
1824-
PT_BR_BROADBANDMODEL = 'pt-BR_BroadbandModel',
1825-
PT_BR_NARROWBANDMODEL = 'pt-BR_NarrowbandModel',
1826-
ZH_CN_BROADBANDMODEL = 'zh-CN_BroadbandModel',
1827-
ZH_CN_NARROWBANDMODEL = 'zh-CN_NarrowbandModel'
1828-
}
1829-
/** The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. **/
1830-
export enum ContentType {
1831-
AUDIO_BASIC = 'audio/basic',
1832-
AUDIO_FLAC = 'audio/flac',
1833-
AUDIO_L16 = 'audio/l16',
1834-
AUDIO_MP3 = 'audio/mp3',
1835-
AUDIO_MPEG = 'audio/mpeg',
1836-
AUDIO_MULAW = 'audio/mulaw',
1837-
AUDIO_OGG = 'audio/ogg',
1838-
AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus',
1839-
AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis',
1840-
AUDIO_WAV = 'audio/wav',
1841-
AUDIO_WEBM = 'audio/webm',
1842-
AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus',
1843-
AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis',
1844-
MULTIPART_FORM_DATA = 'multipart/form-data'
1845-
}
1846-
}
1847-
18481762
/** Parameters for the `createSession` operation. **/
18491763
export interface CreateSessionParams {
18501764
/** The identifier of the model to be used by the new session. (Use `GET /v1/models` or `GET /v1/models/{model_id}` for information about available models.). **/
@@ -1890,81 +1804,6 @@ namespace SpeechToTextV1 {
18901804
session_id: string;
18911805
}
18921806

1893-
/** Parameters for the `observeResult` operation. **/
1894-
export interface ObserveResultParams {
1895-
/** The ID of the session whose results you want to observe. **/
1896-
session_id: string;
1897-
/** The sequence ID of the recognition task whose results you want to observe. Omit the parameter to obtain results either for an ongoing recognition, if any, or for the next recognition task regardless of whether it specifies a sequence ID. **/
1898-
sequence_id?: number;
1899-
/** If `true`, interim results are returned as a stream of JSON `SpeechRecognitionResults` objects. If `false`, the response is a single `SpeechRecognitionResults` object with final results only. **/
1900-
interim_results?: boolean;
1901-
}
1902-
1903-
/** Parameters for the `recognizeSession` operation. **/
1904-
export interface RecognizeSessionParams {
1905-
/** The ID of the session for the recognition task. **/
1906-
session_id: string;
1907-
/** Set to `chunked` to send the audio in streaming mode; the data does not need to exist fully before being streamed to the service. MULTIPART: You must also set this header for requests with more than one audio part. **/
1908-
transfer_encoding?: RecognizeSessionConstants.TransferEncoding | string;
1909-
/** NON-MULTIPART ONLY: Audio to transcribe in the format specified by the `Content-Type` header. **Required for a non-multipart request.**. **/
1910-
audio?: Blob;
1911-
/** The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. **/
1912-
content_type?: RecognizeSessionConstants.ContentType | string;
1913-
/** NON-MULTIPART ONLY: Sequence ID of this recognition task in the form of a user-specified integer. If omitted, no sequence ID is associated with the recognition task. **/
1914-
sequence_id?: number;
1915-
/** NON-MULTIPART ONLY: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error and with `session_closed` set to `true`. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. **/
1916-
inactivity_timeout?: number;
1917-
/** NON-MULTIPART ONLY: Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results (if supported by the method). Omit the parameter or specify an empty array if you do not need to spot keywords. **/
1918-
keywords?: string[];
1919-
/** NON-MULTIPART ONLY: Confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. **/
1920-
keywords_threshold?: number;
1921-
/** NON-MULTIPART ONLY: Maximum number of alternative transcripts to be returned. By default, a single transcription is returned. **/
1922-
max_alternatives?: number;
1923-
/** NON-MULTIPART ONLY: Confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. **/
1924-
word_alternatives_threshold?: number;
1925-
/** NON-MULTIPART ONLY: If `true`, confidence measure per word is returned. **/
1926-
word_confidence?: boolean;
1927-
/** NON-MULTIPART ONLY: If `true`, time alignment for each word is returned. **/
1928-
timestamps?: boolean;
1929-
/** NON-MULTIPART ONLY: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. **/
1930-
profanity_filter?: boolean;
1931-
/** NON-MULTIPART ONLY: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only. **/
1932-
smart_formatting?: boolean;
1933-
/** NON-MULTIPART ONLY: Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). **/
1934-
speaker_labels?: boolean;
1935-
/** MULTIPART ONLY: Parameters for the multipart recognition request. This must be the first part of the request and must consist of JSON-formatted data. The information describes the subsequent parts of the request, which pass the audio files to be transcribed. **Required for a multipart request.**. **/
1936-
metadata?: string;
1937-
/** MULTIPART ONLY: One or more audio files for the request. For multiple audio files, set `Transfer-Encoding` to `chunked`. **Required for a multipart request.**. **/
1938-
upload?: ReadableStream | FileObject | Buffer;
1939-
/** The content type of upload. **/
1940-
upload_content_type?: string;
1941-
}
1942-
1943-
/** Constants for the `recognizeSession` operation. **/
1944-
export namespace RecognizeSessionConstants {
1945-
/** Set to `chunked` to send the audio in streaming mode; the data does not need to exist fully before being streamed to the service. MULTIPART: You must also set this header for requests with more than one audio part. **/
1946-
export enum TransferEncoding {
1947-
CHUNKED = 'chunked'
1948-
}
1949-
/** The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. **/
1950-
export enum ContentType {
1951-
AUDIO_BASIC = 'audio/basic',
1952-
AUDIO_FLAC = 'audio/flac',
1953-
AUDIO_L16 = 'audio/l16',
1954-
AUDIO_MP3 = 'audio/mp3',
1955-
AUDIO_MPEG = 'audio/mpeg',
1956-
AUDIO_MULAW = 'audio/mulaw',
1957-
AUDIO_OGG = 'audio/ogg',
1958-
AUDIO_OGG_CODECS_OPUS = 'audio/ogg;codecs=opus',
1959-
AUDIO_OGG_CODECS_VORBIS = 'audio/ogg;codecs=vorbis',
1960-
AUDIO_WAV = 'audio/wav',
1961-
AUDIO_WEBM = 'audio/webm',
1962-
AUDIO_WEBM_CODECS_OPUS = 'audio/webm;codecs=opus',
1963-
AUDIO_WEBM_CODECS_VORBIS = 'audio/webm;codecs=vorbis',
1964-
MULTIPART_FORM_DATA = 'multipart/form-data'
1965-
}
1966-
}
1967-
19681807
/** Parameters for the `checkJob` operation. **/
19691808
export interface CheckJobParams {
19701809
/** The ID of the job whose status is to be checked. **/

0 commit comments

Comments
 (0)