diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index acbe40086fa3..fbe1deaa18b5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -27032,33 +27032,24 @@ importers: sdk/search/search-documents: dependencies: + '@azure-rest/core-client': + specifier: ^2.3.1 + version: link:../../core/core-client-rest '@azure/core-auth': specifier: ^1.9.0 version: link:../../core/core-auth - '@azure/core-client': - specifier: ^1.9.2 - version: link:../../core/core-client - '@azure/core-http-compat': - specifier: ^2.1.2 - version: link:../../core/core-http-compat - '@azure/core-paging': - specifier: ^1.6.2 - version: link:../../core/core-paging '@azure/core-rest-pipeline': - specifier: ^1.18.0 + specifier: ^1.20.0 version: link:../../core/core-rest-pipeline '@azure/core-tracing': - specifier: ^1.2.0 + specifier: ^1.0.0 version: link:../../core/core-tracing '@azure/core-util': - specifier: ^1.11.0 + specifier: ^1.12.0 version: link:../../core/core-util '@azure/logger': - specifier: ^1.1.4 + specifier: ^1.2.0 version: link:../../core/logger - events: - specifier: ^3.0.0 - version: 3.3.0 tslib: specifier: ^2.8.1 version: 2.8.1 @@ -27081,24 +27072,15 @@ importers: '@azure/identity': specifier: catalog:internal version: 4.11.1 - '@azure/openai': - specifier: 1.0.0-beta.12 - version: 1.0.0-beta.12 '@types/node': specifier: 'catalog:' version: 20.19.25 - '@vitest/browser-playwright': - specifier: catalog:testing - version: 4.0.9(msw@2.7.3(@types/node@20.19.25)(typescript@5.9.3))(playwright@1.56.1)(vite@7.2.2(@types/node@20.19.25)(terser@5.39.0)(tsx@4.20.6)(yaml@2.8.1))(vitest@4.0.9) '@vitest/coverage-istanbul': specifier: catalog:testing version: 4.0.9(vitest@4.0.9) cross-env: specifier: 'catalog:' version: 7.0.3 - dotenv: - specifier: catalog:testing - version: 16.6.1 eslint: specifier: 'catalog:' version: 9.39.1 @@ -27114,9 +27096,6 @@ importers: tshy: specifier: 'catalog:' version: 3.1.0 - type-plus: - specifier: ^7.6.2 - version: 7.6.2 typescript: specifier: 'catalog:' version: 5.9.3 @@ -33579,11 +33558,6 @@ packages: resolution: {integrity: sha512-dQrex2LiXwlCe9WuBHnCsY+xxLyuMXSd2SDEYJuhqB7cE8u6QafiC1xy8j8eBjGO30AsRi2M6amH0ZKk7vJpjA==} engines: {node: '>=16'} - '@azure/openai@1.0.0-beta.12': - resolution: {integrity: sha512-qKblxr6oVa8GsyNzY+/Ub9VmEsPYKhBrUrPaNEQiM+qrxnBPVm9kaeqGFFb/U78Q2zOabmhF9ctYt3xBW0nWnQ==} - engines: {node: '>=18.0.0'} - deprecated: 'The Azure OpenAI client library for JavaScript beta has been retired. Please migrate to the stable OpenAI SDK for JavaScript using the migration guide: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/MIGRATION.md.' - '@azure/opentelemetry-instrumentation-azure-sdk@1.0.0-beta.7': resolution: {integrity: sha512-boG33EDRcbw0Jo2cRgB6bccSirKOzYdYFMdcSsnOajLCLfJ8WIve3vxUMi7YZKxM8txZX/0cwzUU6crXmYxXZg==} engines: {node: '>=18.0.0'} @@ -37899,9 +37873,6 @@ packages: engines: {node: '>=10'} hasBin: true - tersify@3.12.1: - resolution: {integrity: sha512-VwzXGHZSOB4T27s4uvh9v8FYrNXyfVz0nBQi28TDwrZoQwT8ZJUp1W2Ff73ekN07stJSb0D+pr6iXeNeFqTI6Q==} - thenify-all@1.6.0: resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} engines: {node: '>=0.8'} @@ -38065,9 +38036,6 @@ packages: resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} engines: {node: '>= 0.6'} - type-plus@7.6.2: - resolution: {integrity: sha512-qUlXv9Y0/W56pg38m275IMD3WA03QbVoqNY16S3kmwtuA4gOT2iheyUdOp8NWrmXWpf7om98hzr7AZD/eW2jLA==} - typescript-eslint@8.46.4: resolution: {integrity: sha512-KALyxkpYV5Ix7UhvjTwJXZv76VWsHG+NjNlt/z+a17SOQSiOcBdUXdbJdyXi7RPxrBFECtFOiPwUJQusJuCqrg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -38142,10 +38110,6 @@ packages: resolution: {integrity: sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==} engines: {node: '>= 10.0.0'} - unpartial@1.0.5: - resolution: {integrity: sha512-yAqaXcachjgZUnM2yIkf+4KJhmyuoj7stBvlnlZpB15OYVbKnLhgJfmLW7qkpzLHCdsm1bEFvhyN9hCmlZ3uuw==} - engines: {node: '>=6'} - unpipe@1.0.0: resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} engines: {node: '>= 0.8'} @@ -38962,18 +38926,6 @@ snapshots: jsonwebtoken: 9.0.2 uuid: 8.3.2 - '@azure/openai@1.0.0-beta.12': - dependencies: - '@azure-rest/core-client': 1.4.0 - '@azure/core-auth': 1.10.1 - '@azure/core-rest-pipeline': 1.22.2 - '@azure/core-sse': 2.3.0 - '@azure/core-util': 1.13.1 - '@azure/logger': 1.3.0 - tslib: 2.8.1 - transitivePeerDependencies: - - supports-color - '@azure/opentelemetry-instrumentation-azure-sdk@1.0.0-beta.7': dependencies: '@azure/core-tracing': 1.3.1 @@ -43896,12 +43848,6 @@ snapshots: source-map-support: 0.5.21 optional: true - tersify@3.12.1: - dependencies: - acorn: 8.15.0 - is-buffer: 2.0.5 - unpartial: 1.0.5 - thenify-all@1.6.0: dependencies: thenify: 3.3.1 @@ -44060,11 +44006,6 @@ snapshots: media-typer: 1.1.0 mime-types: 3.0.1 - type-plus@7.6.2: - dependencies: - tersify: 3.12.1 - unpartial: 1.0.5 - typescript-eslint@8.46.4(eslint@9.39.1)(typescript@5.8.3): dependencies: '@typescript-eslint/eslint-plugin': 8.46.4(@typescript-eslint/parser@8.46.4(eslint@9.39.1)(typescript@5.8.3))(eslint@9.39.1)(typescript@5.8.3) @@ -44122,8 +44063,6 @@ snapshots: universalify@2.0.1: {} - unpartial@1.0.5: {} - unpipe@1.0.0: {} untildify@4.0.0: {} diff --git a/sdk/search/search-documents/LICENSE b/sdk/search/search-documents/LICENSE index b2f52a2bad4e..63447fd8bbbf 100644 --- a/sdk/search/search-documents/LICENSE +++ b/sdk/search/search-documents/LICENSE @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE. \ No newline at end of file diff --git a/sdk/search/search-documents/README.md b/sdk/search/search-documents/README.md index bcab593938e6..f84983531af4 100644 --- a/sdk/search/search-documents/README.md +++ b/sdk/search/search-documents/README.md @@ -1,478 +1,92 @@ -# Azure AI Search client library for JavaScript - -[Azure AI Search](https://learn.microsoft.com/azure/search/) (formerly known as "Azure Cognitive Search") is an AI-powered information retrieval platform that helps developers build rich search experiences and generative AI apps that combine large language models with enterprise data. - -The Azure AI Search service is well suited for the following application scenarios: - -- Consolidate varied content types into a single searchable index. - To populate an index, you can push JSON documents that contain your content, - or if your data is already in Azure, create an indexer to pull in data - automatically. -- Attach skillsets to an indexer to create searchable content from images - and unstructured documents. A skillset leverages APIs from Azure AI Services - for built-in OCR, entity recognition, key phrase extraction, language - detection, text translation, and sentiment analysis. You can also add - custom skills to integrate external processing of your content during - data ingestion. -- In a search client application, implement query logic and user experiences - similar to commercial web search engines and chat-style apps. - -Use the @azure/search-documents client library to: - -- Submit queries using vector, keyword, and hybrid query forms. -- Implement filtered queries for metadata, geospatial search, faceted navigation, - or to narrow results based on filter criteria. -- Create and manage search indexes. -- Upload and update documents in the search index. -- Create and manage indexers that pull data from Azure into an index. -- Create and manage skillsets that add AI enrichment to data ingestion. -- Create and manage analyzers for advanced text analysis or multi-lingual content. -- Optimize results through semantic ranking and scoring profiles to factor in business logic or freshness. +# Azure KnowledgeBaseRetrieval client library for JavaScript + +This package contains an isomorphic SDK (runs both in Node.js and in browsers) for Azure KnowledgeBaseRetrieval client. + +Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. Key links: -- [Source code](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/search/search-documents/) +- [Source code](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/search/search-documents) - [Package (NPM)](https://www.npmjs.com/package/@azure/search-documents) -- [API reference documentation](https://learn.microsoft.com/javascript/api/@azure/search-documents) -- [REST API documentation](https://learn.microsoft.com/rest/api/searchservice/) -- [Product documentation](https://learn.microsoft.com/azure/search/) -- [Samples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/search/search-documents/samples) +- [API reference documentation](https://learn.microsoft.com/javascript/api/@azure/search-documents?view=azure-node-preview) ## Getting started -### Install the `@azure/search-documents` package - -```bash -npm install @azure/search-documents -``` - ### Currently supported environments - [LTS versions of Node.js](https://github.com/nodejs/release#release-schedule) -- Latest versions of Safari, Chrome, Microsoft Edge, and Firefox. +- Latest versions of Safari, Chrome, Edge and Firefox. See our [support policy](https://github.com/Azure/azure-sdk-for-js/blob/main/SUPPORT.md) for more details. ### Prerequisites -- An [Azure subscription](https://azure.microsoft.com/free/) -- A [Search service][create_search_service_docs] - -To create a new search service, you can use the [Azure portal][create_search_service_docs], [Azure PowerShell][create_search_service_ps], or the [Azure CLI][create_search_service_cli]. Here's an example using the Azure CLI to create a free instance for getting started: - -```Powershell -az search service create --name --resource-group --sku free --location westus -``` - -See [choosing a pricing tier](https://learn.microsoft.com/azure/search/search-sku-tier) for more information about available options. - -### Authenticate the client - -To interact with the search service, you'll need to create an instance of the appropriate client class: `SearchClient` for searching indexed documents, `SearchIndexClient` for managing indexes, or `SearchIndexerClient` for crawling data sources and loading search documents into an index. To instantiate a client object, you'll need an **endpoint** and **Azure roles** or an **API key**. You can refer to the documentation for more information on [supported authenticating approaches](https://learn.microsoft.com/azure/search/search-security-overview#authentication) with the search service. - -#### Get an API Key - -An API key can be an easier approach to start with because it doesn't require pre-existing role assignments. - -You can get the **endpoint** and an **API key** from the search service in the [Azure portal](https://portal.azure.com/). Please refer the [documentation](https://learn.microsoft.com/azure/search/search-security-api-keys) for instructions on how to get an API key. - -Alternatively, you can use the following [Azure CLI](https://learn.microsoft.com/cli/azure/) command to retrieve the API key from the search service: - -```Powershell -az search admin-key show --resource-group --service-name -``` - -There are two types of keys used to access your search service: **admin** _(read-write)_ and **query** _(read-only)_ keys. Restricting access and operations in client apps is essential to safeguarding the search assets on your service. Always use a query key rather than an admin key for any query originating from a client app. - -_Note: The example Azure CLI snippet above retrieves an admin key so it's easier to get started exploring APIs, but it should be managed carefully._ - -Once you have an api-key, you can use it as follows: - -```ts snippet:ReadmeSampleCreateClient_APIKey -import { - SearchClient, - AzureKeyCredential, - SearchIndexClient, - SearchIndexerClient, -} from "@azure/search-documents"; - -// To query and manipulate documents -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); - -// To manage indexes and synonymmaps -const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); - -// To manage indexers, datasources and skillsets -const indexerClient = new SearchIndexerClient("", new AzureKeyCredential("")); -``` +- An [Azure subscription][azure_sub]. -### Authenticate in a National Cloud - -To authenticate in a [National Cloud](https://learn.microsoft.com/azure/active-directory/develop/authentication-national-cloud), you will need to make the following additions to your client configuration: - -- Set the `Audience` in `SearchClientOptions` - -```ts snippet:ReadmeSampleCreateClient_NationalCloud -import { - SearchClient, - AzureKeyCredential, - KnownSearchAudience, - SearchIndexClient, - SearchIndexerClient, -} from "@azure/search-documents"; - -// To query and manipulate documents -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - { - audience: KnownSearchAudience.AzureChina, - }, -); - -// To manage indexes and synonymmaps -const indexClient = new SearchIndexClient("", new AzureKeyCredential(""), { - audience: KnownSearchAudience.AzureChina, -}); - -// To manage indexers, datasources and skillsets -const indexerClient = new SearchIndexerClient("", new AzureKeyCredential(""), { - audience: KnownSearchAudience.AzureChina, -}); -``` +### Install the `@azure/search-documents` package -## Key concepts +Install the Azure KnowledgeBaseRetrieval client library for JavaScript with `npm`: -An Azure AI Search service contains one or more indexes that provide persistent storage of searchable data in the form of JSON documents. _(If you're brand new to search, you can make a very rough analogy between indexes and database tables.)_ The @azure/search-documents client library -exposes operations on these resources through three main client types. - -- `SearchClient` helps with: - - - [Searching](https://learn.microsoft.com/azure/search/search-lucene-query-architecture) - your indexed documents using [vector queries](https://learn.microsoft.com/azure/search/vector-search-how-to-query), - [keyword queries](https://learn.microsoft.com/azure/search/search-query-create) - and [hybrid queries](https://learn.microsoft.com/azure/search/hybrid-search-how-to-query) - - [Vector query filters](https://learn.microsoft.com/azure/search/vector-search-filters) and [Text query filters](https://learn.microsoft.com/azure/search/search-filters) - - [Semantic ranking](https://learn.microsoft.com/azure/search/semantic-how-to-query-request) and [scoring profiles](https://learn.microsoft.com/azure/search/index-add-scoring-profiles) for boosting relevance - - [Autocompleting](https://learn.microsoft.com/rest/api/searchservice/autocomplete) partially typed search terms based on documents in the index - - [Suggesting](https://learn.microsoft.com/rest/api/searchservice/suggestions) the most likely matching text in documents as a user types - - [Adding, Updating or Deleting Documents](https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents) documents from an index - -- `SearchIndexClient` allows you to: - - - [Create, delete, update, or configure a search index](https://learn.microsoft.com/rest/api/searchservice/index-operations) - - [Declare custom synonym maps to expand or rewrite queries](https://learn.microsoft.com/rest/api/searchservice/synonym-map-operations) - -- `SearchIndexerClient` allows you to: - - [Start indexers to automatically crawl data sources](https://learn.microsoft.com/rest/api/searchservice/indexer-operations) - - [Define AI powered Skillsets to transform and enrich your data](https://learn.microsoft.com/rest/api/searchservice/skillset-operations) - -**Note**: These clients cannot function in the browser because the APIs it calls do not have support for Cross-Origin Resource Sharing (CORS). - -## TypeScript/JavaScript specific concepts - -### Documents - -An item stored inside a search index. The shape of this document is described in the index using the `fields` property. Each `SearchField` has a name, a datatype, and additional metadata such as if it is searchable or filterable. - -### Pagination - -Typically you will only wish to [show a subset of search results](https://learn.microsoft.com/azure/search/search-pagination-page-layout#total-hits-and-page-counts) to a user at one time. To support this, you can use the `top`, `skip` and `includeTotalCount` parameters to provide a paged experience on top of search results. - -### Document field encoding - -[Supported data types](https://learn.microsoft.com/rest/api/searchservice/Supported-data-types) in an index are mapped to JSON types in API requests/responses. The JS client library keeps these mostly the same, with some exceptions: - -- `Edm.DateTimeOffset` is converted to a JS `Date`. -- `Edm.GeographyPoint` is converted to a `GeographyPoint` type exported by the client library. -- Special values of the `number` type (NaN, Infinity, -Infinity) are serialized as strings in the REST API, but are converted back to `number` by the client library. - -**Note**: Data types are converted based on value, not the field type in the index schema. This means that if you have an ISO8601 Date string (e.g. "2020-03-06T18:48:27.896Z") as the value of a field, it will be converted to a Date regardless of how you stored it in your schema. - -## Examples - -The following examples demonstrate the basics - please [check out our samples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/search/search-documents/samples) for much more. - -- [Creating an index](#create-an-index) -- [Retrieving a specific document from your index](#retrieve-a-specific-document-from-an-index) -- [Adding documents to your index](#adding-documents-into-an-index) -- [Perform a search on documents](#perform-a-search-on-documents) - - [Querying with TypeScript](#querying-with-typescript) - - [Querying with OData filters](#querying-with-odata-filters) - - [Querying with facets](#querying-with-facets) - -### Create an Index - -```ts snippet:ReadmeSampleCreateIndex -import { SearchIndexClient, AzureKeyCredential } from "@azure/search-documents"; - -const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); - -const result = await indexClient.createIndex({ - name: "example-index", - fields: [ - { - type: "Edm.String", - name: "id", - key: true, - }, - { - type: "Edm.Double", - name: "awesomenessLevel", - sortable: true, - filterable: true, - facetable: true, - }, - { - type: "Edm.String", - name: "description", - searchable: true, - }, - { - type: "Edm.ComplexType", - name: "details", - fields: [ - { - type: "Collection(Edm.String)", - name: "tags", - searchable: true, - }, - ], - }, - { - type: "Edm.Int32", - name: "hiddenWeight", - hidden: true, - }, - ], -}); - -console.log(`Index created with name ${result.name}`); +```bash +npm install @azure/search-documents ``` -### Retrieve a specific document from an index +### Create and authenticate a `KnowledgeBaseRetrievalClient` -A specific document can be retrieved by its primary key value: +To create a client object to access the Azure KnowledgeBaseRetrieval API, you will need the `endpoint` of your Azure KnowledgeBaseRetrieval resource and a `credential`. The Azure KnowledgeBaseRetrieval client can use Azure Active Directory credentials to authenticate. +You can find the endpoint for your Azure KnowledgeBaseRetrieval resource in the [Azure Portal][azure_portal]. -```ts snippet:ReadmeSampleGetDocument -import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; +You can authenticate with Azure Active Directory using a credential from the [@azure/identity][azure_identity] library or [an existing AAD Token](https://github.com/Azure/azure-sdk-for-js/blob/master/sdk/identity/identity/samples/AzureIdentityExamples.md#authenticating-with-a-pre-fetched-access-token). -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); +To use the [DefaultAzureCredential][defaultazurecredential] provider shown below, or other credential providers provided with the Azure SDK, please install the `@azure/identity` package: -const result = await searchClient.getDocument("1234"); +```bash +npm install @azure/identity ``` -### Adding documents into an index +You will also need to **register a new AAD application and grant access to Azure KnowledgeBaseRetrieval** by assigning the suitable role to your service principal (note: roles such as `"Owner"` will not grant the necessary permissions). -You can upload multiple documents into index inside a batch: +For more information about how to create an Azure AD Application check out [this guide](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal). -```ts snippet:ReadmeSampleUploadDocuments -import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; +Using Node.js and Node-like environments, you can use the `DefaultAzureCredential` class to authenticate the client. -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); +```ts +import { KnowledgeBaseRetrievalClient } from "@azure/search-documents"; +import { DefaultAzureCredential } from "@azure/identity"; -const uploadResult = await searchClient.uploadDocuments([ - // JSON objects matching the shape of the client's index - {}, - {}, - {}, -]); -for (const result of uploadResult.results) { - console.log(`Uploaded ${result.key}; succeeded? ${result.succeeded}`); -} +const client = new KnowledgeBaseRetrievalClient("", new DefaultAzureCredential()); ``` -### Perform a search on documents - -To list all results of a particular query, you can use `search` with a search string that uses [simple query syntax](https://learn.microsoft.com/azure/search/query-simple-syntax): +For browser environments, use the `InteractiveBrowserCredential` from the `@azure/identity` package to authenticate. -```ts snippet:ReadmeSampleSearch -import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; +```ts +import { InteractiveBrowserCredential } from "@azure/identity"; +import { KnowledgeBaseRetrievalClient } from "@azure/search-documents"; -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); - -const searchResults = await searchClient.search("wifi -luxury"); -for await (const result of searchResults.results) { - console.log(result); -} +const credential = new InteractiveBrowserCredential({ + tenantId: "", + clientId: "" + }); +const client = new KnowledgeBaseRetrievalClient("", credential); ``` -For a more advanced search that uses [Lucene syntax](https://learn.microsoft.com/azure/search/query-lucene-syntax), specify `queryType` to be `full`: - -```ts snippet:ReadmeSampleSearchLucene -import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); - -const searchResults = await searchClient.search('Category:budget AND "recently renovated"^3', { - queryType: "full", - searchMode: "all", -}); -for await (const result of searchResults.results) { - console.log(result); -} -``` +### JavaScript Bundle +To use this client library in the browser, first you need to use a bundler. For details on how to do this, please refer to our [bundling documentation](https://aka.ms/AzureSDKBundling). -#### Querying with TypeScript - -In TypeScript, `SearchClient` takes a generic parameter that is the model shape of your index documents. This allows you to perform strongly typed lookup of fields returned in results. TypeScript is also able to check for fields returned when specifying a `select` parameter. - -```ts snippet:ReadmeSampleSearchWithTypes -import { SearchClient, AzureKeyCredential, SelectFields } from "@azure/search-documents"; - -// An example schema for documents in the index -interface Hotel { - hotelId?: string; - hotelName?: string | null; - description?: string | null; - descriptionVector?: Array; - parkingIncluded?: boolean | null; - lastRenovationDate?: Date | null; - rating?: number | null; - rooms?: Array<{ - beds?: number | null; - description?: string | null; - }>; -} - -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); - -const searchResults = await searchClient.search("wifi -luxury", { - // Only fields in Hotel can be added to this array. - // TS will complain if one is misspelled. - select: ["hotelId", "hotelName", "rooms/beds"], -}); - -// These are other ways to declare the correct type for `select`. -const select = ["hotelId", "hotelName", "rooms/beds"] as const; -// This declaration lets you opt out of narrowing the TypeScript type of your documents, -// though the AI Search service will still only return these fields. -const selectWide: SelectFields[] = ["hotelId", "hotelName", "rooms/beds"]; -// This is an invalid declaration. Passing this to `select` will result in a compiler error -// unless you opt out of including the model in the client constructor. -const selectInvalid = ["hotelId", "hotelName", "rooms/beds"]; - -for await (const result of searchResults.results) { - // result.document has hotelId, hotelName, and rating. - // Trying to access result.document.description would emit a TS error. - console.log(result.document.hotelName); -} -``` - -#### Querying with OData filters - -Using the `filter` query parameter allows you to query an index using the syntax of an [OData \$filter expression](https://learn.microsoft.com/azure/search/search-query-odata-filter). - -```ts snippet:ReadmeSampleSearchWithOData -import { SearchClient, AzureKeyCredential, odata } from "@azure/search-documents"; - -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); - -const baseRateMax = 200; -const ratingMin = 4; -const searchResults = await searchClient.search("WiFi", { - filter: odata`Rooms/any(room: room/BaseRate lt ${baseRateMax}) and Rating ge ${ratingMin}`, - orderBy: ["Rating desc"], - select: ["hotelId", "hotelName", "Rating"], -}); -for await (const result of searchResults.results) { - // Each result will have "HotelId", "HotelName", and "Rating" - // in addition to the standard search result property "score" - console.log(result); -} -``` - -#### Querying with vectors - -Text embeddings can be queried using the `vector` search parameter. See [Query vectors](https://learn.microsoft.com/azure/search/vector-search-how-to-query) and [Filter vector queries](https://learn.microsoft.com/azure/search/vector-search-filters) for more information. - -```ts snippet:ReadmeSampleSearchWithVector -import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; - -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); - -const queryVector: number[] = [ - // Embedding of the query "What are the most luxurious hotels?" -]; -const searchResults = await searchClient.search("*", { - vectorSearchOptions: { - queries: [ - { - kind: "vector", - vector: queryVector, - fields: ["descriptionVector"], - kNearestNeighborsCount: 3, - }, - ], - }, -}); -for await (const result of searchResults.results) { - // These results are the nearest neighbors to the query vector - console.log(result); -} -``` - -#### Querying with facets - -[Facets](https://learn.microsoft.com/azure/search/search-filters-facets) are used to help a user of your application refine a search along pre-configured dimensions. [Facet syntax](https://learn.microsoft.com/rest/api/searchservice/search-documents#facetstring-zero-or-more) provides the options to sort and bucket facet values. - -```ts snippet:ReadmeSampleSearchWithFacets -import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; +## Key concepts -const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), -); +### KnowledgeBaseRetrievalClient -const searchResults = await searchClient.search("WiFi", { - facets: ["category,count:3,sort:count", "rooms/baseRate,interval:100"], -}); -console.log(searchResults.facets); -``` - -When retrieving results, a `facets` property will be available that will indicate the number of results that fall into each facet bucket. This can be used to drive refinement (e.g. issuing a follow-up search that filters on the `Rating` being greater than or equal to 3 and less than 4.) +`KnowledgeBaseRetrievalClient` is the primary interface for developers using the Azure KnowledgeBaseRetrieval client library. Explore the methods on this client object to understand the different features of the Azure KnowledgeBaseRetrieval service that you can access. ## Troubleshooting ### Logging -Enabling logging can help uncover useful information about failures. In order to see a log of HTTP requests and responses, set the `AZURE_LOG_LEVEL` environment variable to `info`. Alternatively, logging can be enabled at runtime by calling `setLogLevel` in the `@azure/logger`: +Enabling logging may help uncover useful information about failures. In order to see a log of HTTP requests and responses, set the `AZURE_LOG_LEVEL` environment variable to `info`. Alternatively, logging can be enabled at runtime by calling `setLogLevel` in the `@azure/logger`: -```ts snippet:SetLogLevel +```ts import { setLogLevel } from "@azure/logger"; setLogLevel("info"); @@ -480,34 +94,16 @@ setLogLevel("info"); For more detailed instructions on how to enable logs, you can look at the [@azure/logger package docs](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/core/logger). -## Next steps - -- [Go further with search-documents and our samples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/search/search-documents/samples) -- [Read more about the Azure AI Search service](https://learn.microsoft.com/azure/search/search-what-is-azure-search) ## Contributing If you'd like to contribute to this library, please read the [contributing guide](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md) to learn more about how to build and test the code. -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [cla.microsoft.com][cla]. - -This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. - ## Related projects - [Microsoft Azure SDK for JavaScript](https://github.com/Azure/azure-sdk-for-js) - - -[azure_cli]: https://learn.microsoft.com/cli/azure [azure_sub]: https://azure.microsoft.com/free/ -[search_resource]: https://learn.microsoft.com/azure/search/search-create-service-portal [azure_portal]: https://portal.azure.com -[cognitive_auth]: https://learn.microsoft.com/azure/cognitive-services/authentication -[create_search_service_docs]: https://learn.microsoft.com/azure/search/search-create-service-portal -[create_search_service_ps]: https://learn.microsoft.com/azure/search/search-manage-powershell#create-or-delete-a-service -[create_search_service_cli]: https://learn.microsoft.com/cli/azure/search/service?view=azure-cli-latest#az-search-service-create -[cla]: https://cla.microsoft.com -[coc]: https://opensource.microsoft.com/codeofconduct/ -[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ -[coc_contact]: mailto:opencode@microsoft.com +[azure_identity]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity +[defaultazurecredential]: https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/identity/identity#defaultazurecredential diff --git a/sdk/search/search-documents/api-extractor.json b/sdk/search/search-documents/api-extractor.json index 16d81e2eb512..870d6d399477 100644 --- a/sdk/search/search-documents/api-extractor.json +++ b/sdk/search/search-documents/api-extractor.json @@ -1,3 +1 @@ -{ - "extends": "../../../api-extractor-base.json" -} +{ "extends": "../../../api-extractor-base.json" } diff --git a/sdk/search/search-documents/eslint.config.mjs b/sdk/search/search-documents/eslint.config.mjs index 624f7788769c..6d2f8a5914c3 100644 --- a/sdk/search/search-documents/eslint.config.mjs +++ b/sdk/search/search-documents/eslint.config.mjs @@ -1,3 +1,14 @@ import azsdkEslint from "@azure/eslint-plugin-azure-sdk"; -export default azsdkEslint.config([]); +export default azsdkEslint.config([ + { + rules: { + "@azure/azure-sdk/ts-modules-only-named": "warn", + "@azure/azure-sdk/ts-package-json-types": "warn", + "@azure/azure-sdk/ts-package-json-engine-is-present": "warn", + "@azure/azure-sdk/ts-package-json-files-required": "off", + "@azure/azure-sdk/ts-package-json-main-is-cjs": "off", + "tsdoc/syntax": "warn", + }, + }, +]); diff --git a/sdk/search/search-documents/generated/index.ts b/sdk/search/search-documents/generated/index.ts new file mode 100644 index 000000000000..be6edf73b957 --- /dev/null +++ b/sdk/search/search-documents/generated/index.ts @@ -0,0 +1,612 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + PageSettings, + ContinuablePage, + PagedAsyncIterableIterator, +} from "./static-helpers/pagingHelpers.js"; + +export { SearchClient } from "./search/searchClient.js"; +export { + IndexedSharePointKnowledgeSource, + IndexedSharePointKnowledgeSourceParameters, + KnowledgeSourceIngestionParameters, + AIServices, + KnownKnowledgeSourceIngestionPermissionOption, + KnowledgeSourceIngestionPermissionOption, + KnownKnowledgeSourceContentExtractionMode, + KnowledgeSourceContentExtractionMode, + IndexedOneLakeKnowledgeSource, + IndexedOneLakeKnowledgeSourceParameters, + WebKnowledgeSource, + WebKnowledgeSourceParameters, + WebKnowledgeSourceDomains, + WebKnowledgeSourceDomain, + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, + ServiceIndexersRuntime, + IndexerRuntime, + KnownKnowledgeBaseActivityRecordType, + KnowledgeBaseActivityRecordType, + KnownKnowledgeBaseReferenceType, + KnowledgeBaseReferenceType, + KnowledgeSourceStatus, + KnownKnowledgeSourceSynchronizationStatus, + KnowledgeSourceSynchronizationStatus, + SynchronizationState, + CompletedSynchronizationState, + KnowledgeSourceStatistics, + KnownVersions, +} from "./models/index.js"; +export { + ErrorResponse, + ErrorDetail, + ErrorAdditionalInfo, + SearchDocumentsResult, + FacetResult, + QueryAnswerResult, + DebugInfo, + QueryRewritesDebugInfo, + QueryRewritesValuesDebugInfo, + SearchRequest, + KnownQueryType, + QueryType, + KnownScoringStatistics, + ScoringStatistics, + KnownQueryDebugMode, + QueryDebugMode, + KnownSearchMode, + SearchMode, + KnownQueryLanguage, + QueryLanguage, + KnownQuerySpellerType, + QuerySpellerType, + KnownSemanticErrorMode, + SemanticErrorMode, + KnownQueryAnswerType, + QueryAnswerType, + KnownQueryCaptionType, + QueryCaptionType, + KnownQueryRewritesType, + QueryRewritesType, + VectorQuery, + VectorQueryUnion, + VectorThreshold, + VectorThresholdUnion, + KnownVectorThresholdKind, + VectorThresholdKind, + VectorSimilarityThreshold, + SearchScoreThreshold, + KnownVectorQueryKind, + VectorQueryKind, + VectorizedQuery, + VectorizableTextQuery, + VectorizableImageUrlQuery, + VectorizableImageBinaryQuery, + KnownVectorFilterMode, + VectorFilterMode, + HybridSearch, + KnownHybridCountAndFacetMode, + HybridCountAndFacetMode, + SearchResult, + QueryCaptionResult, + DocumentDebugInfo, + SemanticDebugInfo, + QueryResultDocumentSemanticField, + KnownSemanticFieldState, + SemanticFieldState, + QueryResultDocumentRerankerInput, + VectorsDebugInfo, + QueryResultDocumentSubscores, + TextResult, + SingleVectorFieldResult, + QueryResultDocumentInnerHit, + KnownSemanticErrorReason, + SemanticErrorReason, + KnownSemanticSearchResultsType, + SemanticSearchResultsType, + KnownSemanticQueryRewritesResultType, + SemanticQueryRewritesResultType, + LookupDocument, + SuggestDocumentsResult, + SuggestResult, + IndexDocumentsBatch, + IndexAction, + KnownIndexActionType, + IndexActionType, + IndexDocumentsResult, + IndexingResult, + AutocompleteResult, + AutocompleteItem, + KnownAutocompleteMode, + AutocompleteMode, +} from "./models/azure/search/documents/index.js"; +export { + SynonymMap, + SearchResourceEncryptionKey, + AzureActiveDirectoryApplicationCredentials, + SearchIndexerDataIdentity, + SearchIndexerDataIdentityUnion, + SearchIndexerDataNoneIdentity, + SearchIndexerDataUserAssignedIdentity, + ListSynonymMapsResult, + SearchIndex, + SearchField, + KnownSearchFieldDataType, + SearchFieldDataType, + KnownPermissionFilter, + PermissionFilter, + KnownLexicalAnalyzerName, + LexicalAnalyzerName, + KnownLexicalNormalizerName, + LexicalNormalizerName, + KnownVectorEncodingFormat, + VectorEncodingFormat, + ScoringProfile, + TextWeights, + ScoringFunction, + ScoringFunctionUnion, + KnownScoringFunctionInterpolation, + ScoringFunctionInterpolation, + DistanceScoringFunction, + DistanceScoringParameters, + FreshnessScoringFunction, + FreshnessScoringParameters, + MagnitudeScoringFunction, + MagnitudeScoringParameters, + TagScoringFunction, + TagScoringParameters, + KnownScoringFunctionAggregation, + ScoringFunctionAggregation, + CorsOptions, + SearchSuggester, + LexicalAnalyzer, + LexicalAnalyzerUnion, + CustomAnalyzer, + KnownLexicalTokenizerName, + LexicalTokenizerName, + KnownTokenFilterName, + TokenFilterName, + KnownCharFilterName, + CharFilterName, + PatternAnalyzer, + KnownRegexFlags, + RegexFlags, + LuceneStandardAnalyzer, + StopAnalyzer, + LexicalTokenizer, + LexicalTokenizerUnion, + ClassicTokenizer, + EdgeNGramTokenizer, + KnownTokenCharacterKind, + TokenCharacterKind, + KeywordTokenizer, + MicrosoftLanguageTokenizer, + KnownMicrosoftTokenizerLanguage, + MicrosoftTokenizerLanguage, + MicrosoftLanguageStemmingTokenizer, + KnownMicrosoftStemmingTokenizerLanguage, + MicrosoftStemmingTokenizerLanguage, + NGramTokenizer, + PathHierarchyTokenizer, + PatternTokenizer, + LuceneStandardTokenizer, + UaxUrlEmailTokenizer, + TokenFilter, + TokenFilterUnion, + AsciiFoldingTokenFilter, + CjkBigramTokenFilter, + KnownCjkBigramTokenFilterScripts, + CjkBigramTokenFilterScripts, + CommonGramTokenFilter, + DictionaryDecompounderTokenFilter, + EdgeNGramTokenFilter, + KnownEdgeNGramTokenFilterSide, + EdgeNGramTokenFilterSide, + ElisionTokenFilter, + KeepTokenFilter, + KeywordMarkerTokenFilter, + LengthTokenFilter, + LimitTokenFilter, + NGramTokenFilter, + PatternCaptureTokenFilter, + PatternReplaceTokenFilter, + PhoneticTokenFilter, + KnownPhoneticEncoder, + PhoneticEncoder, + ShingleTokenFilter, + SnowballTokenFilter, + KnownSnowballTokenFilterLanguage, + SnowballTokenFilterLanguage, + StemmerTokenFilter, + KnownStemmerTokenFilterLanguage, + StemmerTokenFilterLanguage, + StemmerOverrideTokenFilter, + StopwordsTokenFilter, + KnownStopwordsList, + StopwordsList, + SynonymTokenFilter, + TruncateTokenFilter, + UniqueTokenFilter, + WordDelimiterTokenFilter, + CharFilter, + CharFilterUnion, + MappingCharFilter, + PatternReplaceCharFilter, + LexicalNormalizer, + LexicalNormalizerUnion, + CustomNormalizer, + SimilarityAlgorithm, + SimilarityAlgorithmUnion, + ClassicSimilarity, + BM25Similarity, + SemanticSearch, + SemanticConfiguration, + SemanticPrioritizedFields, + SemanticField, + KnownRankingOrder, + RankingOrder, + VectorSearch, + VectorSearchProfile, + VectorSearchAlgorithmConfiguration, + VectorSearchAlgorithmConfigurationUnion, + KnownVectorSearchAlgorithmKind, + VectorSearchAlgorithmKind, + HnswAlgorithmConfiguration, + HnswParameters, + KnownVectorSearchAlgorithmMetric, + VectorSearchAlgorithmMetric, + ExhaustiveKnnAlgorithmConfiguration, + ExhaustiveKnnParameters, + VectorSearchVectorizer, + VectorSearchVectorizerUnion, + KnownVectorSearchVectorizerKind, + VectorSearchVectorizerKind, + AzureOpenAIVectorizer, + AzureOpenAIVectorizerParameters, + KnownAzureOpenAIModelName, + AzureOpenAIModelName, + WebApiVectorizer, + WebApiVectorizerParameters, + AIServicesVisionVectorizer, + AIServicesVisionParameters, + AzureMachineLearningVectorizer, + AzureMachineLearningParameters, + KnownAIFoundryModelCatalogName, + AIFoundryModelCatalogName, + VectorSearchCompression, + VectorSearchCompressionUnion, + RescoringOptions, + KnownVectorSearchCompressionRescoreStorageMethod, + VectorSearchCompressionRescoreStorageMethod, + KnownVectorSearchCompressionKind, + VectorSearchCompressionKind, + ScalarQuantizationCompression, + ScalarQuantizationParameters, + KnownVectorSearchCompressionTarget, + VectorSearchCompressionTarget, + BinaryQuantizationCompression, + KnownSearchIndexPermissionFilterOption, + SearchIndexPermissionFilterOption, + GetIndexStatisticsResult, + AnalyzeTextOptions, + AnalyzeResult, + AnalyzedTokenInfo, + SearchAlias, + KnowledgeBase, + KnowledgeSourceReference, + KnowledgeBaseModel, + KnowledgeBaseModelUnion, + KnownKnowledgeBaseModelKind, + KnowledgeBaseModelKind, + KnowledgeBaseAzureOpenAIModel, + AzureOpenAiParameters, + KnowledgeSource, + KnowledgeSourceUnion, + KnownKnowledgeSourceKind, + KnowledgeSourceKind, + SearchIndexKnowledgeSource, + SearchIndexKnowledgeSourceParameters, + AzureBlobKnowledgeSource, + AzureBlobKnowledgeSourceParameters, + IndexingSchedule, + CreatedResources, + KnownBlobIndexerDataToExtract, + BlobIndexerDataToExtract, + KnownBlobIndexerImageAction, + BlobIndexerImageAction, + KnownBlobIndexerParsingMode, + BlobIndexerParsingMode, + KnownMarkdownHeaderDepth, + MarkdownHeaderDepth, + KnownMarkdownParsingSubmode, + MarkdownParsingSubmode, + KnownBlobIndexerPDFTextRotationAlgorithm, + BlobIndexerPDFTextRotationAlgorithm, + SearchServiceStatistics, + ServiceCounters, + ResourceCounter, + ServiceLimits, + IndexStatisticsSummary, + SearchIndexerDataSourceConnection, + KnownSearchIndexerDataSourceType, + SearchIndexerDataSourceType, + DataSourceCredentials, + SearchIndexerDataContainer, + KnownIndexerPermissionOption, + IndexerPermissionOption, + DataChangeDetectionPolicy, + DataChangeDetectionPolicyUnion, + HighWaterMarkChangeDetectionPolicy, + SqlIntegratedChangeTrackingPolicy, + DataDeletionDetectionPolicy, + DataDeletionDetectionPolicyUnion, + SoftDeleteColumnDeletionDetectionPolicy, + NativeBlobSoftDeleteDeletionDetectionPolicy, + ListDataSourcesResult, + DocumentKeysOrIds, + SearchIndexer, + IndexingParameters, + IndexingParametersConfiguration, + KnownIndexerExecutionEnvironment, + IndexerExecutionEnvironment, + FieldMapping, + FieldMappingFunction, + SearchIndexerCache, + ListIndexersResult, + SearchIndexerStatus, + KnownIndexerStatus, + IndexerStatus, + IndexerExecutionResult, + KnownIndexerExecutionStatus, + IndexerExecutionStatus, + KnownIndexerExecutionStatusDetail, + IndexerExecutionStatusDetail, + KnownIndexingMode, + IndexingMode, + IndexerCurrentState, + SearchIndexerError, + SearchIndexerWarning, + SearchIndexerLimits, + SearchIndexerSkillset, + SearchIndexerSkill, + SearchIndexerSkillUnion, + InputFieldMappingEntry, + OutputFieldMappingEntry, + ConditionalSkill, + KeyPhraseExtractionSkill, + KnownKeyPhraseExtractionSkillLanguage, + KeyPhraseExtractionSkillLanguage, + OcrSkill, + KnownOcrSkillLanguage, + OcrSkillLanguage, + KnownOcrLineEnding, + OcrLineEnding, + ImageAnalysisSkill, + KnownImageAnalysisSkillLanguage, + ImageAnalysisSkillLanguage, + KnownVisualFeature, + VisualFeature, + KnownImageDetail, + ImageDetail, + LanguageDetectionSkill, + ShaperSkill, + MergeSkill, + EntityRecognitionSkill, + KnownEntityCategory, + EntityCategory, + KnownEntityRecognitionSkillLanguage, + EntityRecognitionSkillLanguage, + SentimentSkill, + KnownSentimentSkillLanguage, + SentimentSkillLanguage, + SentimentSkillV3, + EntityLinkingSkill, + EntityRecognitionSkillV3, + PIIDetectionSkill, + KnownPIIDetectionSkillMaskingMode, + PIIDetectionSkillMaskingMode, + SplitSkill, + KnownSplitSkillLanguage, + SplitSkillLanguage, + KnownTextSplitMode, + TextSplitMode, + KnownSplitSkillUnit, + SplitSkillUnit, + AzureOpenAITokenizerParameters, + KnownSplitSkillEncoderModelName, + SplitSkillEncoderModelName, + CustomEntityLookupSkill, + KnownCustomEntityLookupSkillLanguage, + CustomEntityLookupSkillLanguage, + CustomEntity, + CustomEntityAlias, + TextTranslationSkill, + KnownTextTranslationSkillLanguage, + TextTranslationSkillLanguage, + DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, + KnownDocumentIntelligenceLayoutSkillOutputFormat, + DocumentIntelligenceLayoutSkillOutputFormat, + KnownDocumentIntelligenceLayoutSkillOutputMode, + DocumentIntelligenceLayoutSkillOutputMode, + KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + KnownDocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillChunkingProperties, + KnownDocumentIntelligenceLayoutSkillChunkingUnit, + DocumentIntelligenceLayoutSkillChunkingUnit, + WebApiSkill, + AzureMachineLearningSkill, + AzureOpenAIEmbeddingSkill, + VisionVectorizeSkill, + ContentUnderstandingSkill, + KnownContentUnderstandingSkillExtractionOptions, + ContentUnderstandingSkillExtractionOptions, + ContentUnderstandingSkillChunkingProperties, + KnownContentUnderstandingSkillChunkingUnit, + ContentUnderstandingSkillChunkingUnit, + ChatCompletionSkill, + WebApiHttpHeaders, + CommonModelParameters, + KnownChatCompletionExtraParametersBehavior, + ChatCompletionExtraParametersBehavior, + ChatCompletionResponseFormat, + KnownChatCompletionResponseFormatType, + ChatCompletionResponseFormatType, + ChatCompletionSchemaProperties, + ChatCompletionSchema, + CognitiveServicesAccount, + CognitiveServicesAccountUnion, + DefaultCognitiveServicesAccount, + CognitiveServicesAccountKey, + AIServicesAccountKey, + AIServicesAccountIdentity, + SearchIndexerKnowledgeStore, + SearchIndexerKnowledgeStoreProjection, + SearchIndexerKnowledgeStoreTableProjectionSelector, + SearchIndexerKnowledgeStoreObjectProjectionSelector, + SearchIndexerKnowledgeStoreFileProjectionSelector, + SearchIndexerKnowledgeStoreParameters, + SearchIndexerIndexProjection, + SearchIndexerIndexProjectionSelector, + SearchIndexerIndexProjectionsParameters, + KnownIndexProjectionMode, + IndexProjectionMode, + SearchIndexerKnowledgeStoreProjectionSelector, + SearchIndexerKnowledgeStoreBlobProjectionSelector, + ListSkillsetsResult, + SkillNames, + IndexerResyncBody, + KnownIndexerResyncOption, + IndexerResyncOption, +} from "./models/azure/search/documents/indexes/index.js"; +export { + KnowledgeRetrievalReasoningEffort, + KnowledgeRetrievalReasoningEffortUnion, + KnownKnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalHighReasoningEffort, + KnownKnowledgeRetrievalOutputMode, + KnowledgeRetrievalOutputMode, + KnowledgeBaseRetrievalRequest, + KnowledgeBaseMessage, + KnowledgeBaseMessageContent, + KnowledgeBaseMessageContentUnion, + KnownKnowledgeBaseMessageContentType, + KnowledgeBaseMessageContentType, + KnowledgeBaseMessageTextContent, + KnowledgeBaseMessageImageContent, + KnowledgeBaseImageContent, + KnowledgeRetrievalIntent, + KnowledgeRetrievalIntentUnion, + KnownKnowledgeRetrievalIntentType, + KnowledgeRetrievalIntentType, + KnowledgeRetrievalSemanticIntent, + KnowledgeSourceParams, + KnowledgeSourceParamsUnion, + SearchIndexKnowledgeSourceParams, + AzureBlobKnowledgeSourceParams, + IndexedSharePointKnowledgeSourceParams, + IndexedOneLakeKnowledgeSourceParams, + WebKnowledgeSourceParams, + RemoteSharePointKnowledgeSourceParams, + KnowledgeBaseRetrievalResponse, + KnowledgeBaseActivityRecord, + KnowledgeBaseActivityRecordUnion, + KnowledgeBaseErrorDetail, + KnowledgeBaseErrorAdditionalInfo, + KnowledgeBaseModelQueryPlanningActivityRecord, + KnowledgeBaseModelAnswerSynthesisActivityRecord, + KnowledgeBaseAgenticReasoningActivityRecord, + KnowledgeBaseReference, + KnowledgeBaseReferenceUnion, + KnowledgeBaseSearchIndexReference, + KnowledgeBaseAzureBlobReference, + KnowledgeBaseIndexedSharePointReference, + KnowledgeBaseIndexedOneLakeReference, + KnowledgeBaseWebReference, + KnowledgeBaseRemoteSharePointReference, + SharePointSensitivityLabelInfo, +} from "./models/azure/search/documents/knowledgeBase/index.js"; +export { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, + SearchClientOptionalParams, +} from "./search/api/index.js"; +export { PageSettings, ContinuablePage, PagedAsyncIterableIterator }; +export { SearchIndexClient } from "./searchIndex/searchIndexClient.js"; +export { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, + SearchIndexClientOptionalParams, +} from "./searchIndex/api/index.js"; +export { SearchIndexerClient } from "./searchIndexer/searchIndexerClient.js"; +export { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, + SearchIndexerClientOptionalParams, +} from "./searchIndexer/api/index.js"; +export { KnowledgeBaseRetrievalClient } from "./knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.js"; +export { + KnowledgeBaseRetrievalClientOptionalParams, + RetrieveOptionalParams, +} from "./knowledgeBaseRetrieval/api/index.js"; diff --git a/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/index.ts b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/index.ts new file mode 100644 index 000000000000..f19d348533fb --- /dev/null +++ b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/index.ts @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + createKnowledgeBaseRetrieval, + KnowledgeBaseRetrievalContext, + KnowledgeBaseRetrievalClientOptionalParams, +} from "./knowledgeBaseRetrievalContext.js"; +export { retrieve } from "./operations.js"; +export { RetrieveOptionalParams } from "./options.js"; diff --git a/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts new file mode 100644 index 000000000000..d173341d7f45 --- /dev/null +++ b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface KnowledgeBaseRetrievalContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; +} + +/** Optional parameters for the client. */ +export interface KnowledgeBaseRetrievalClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createKnowledgeBaseRetrieval( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: KnowledgeBaseRetrievalClientOptionalParams = {}, +): KnowledgeBaseRetrievalContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion } as KnowledgeBaseRetrievalContext; +} diff --git a/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/operations.ts b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/operations.ts new file mode 100644 index 000000000000..15f6dc96ae27 --- /dev/null +++ b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/operations.ts @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { KnowledgeBaseRetrievalContext as Client } from "./index.js"; +import { + KnowledgeBaseRetrievalRequest, + knowledgeBaseRetrievalRequestSerializer, + KnowledgeBaseRetrievalResponse, + knowledgeBaseRetrievalResponseDeserializer, +} from "../../models/azure/search/documents/knowledgeBase/models.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { RetrieveOptionalParams } from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _retrieveSend( + context: Client, + knowledgeBaseName: string, + retrievalRequest: KnowledgeBaseRetrievalRequest, + options: RetrieveOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/retrieve/{knowledgeBaseName}{?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeBaseRetrievalRequestSerializer(retrievalRequest), + }); +} + +export async function _retrieveDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "206"]; + if (!expectedStatuses.includes(result.status)) { + throw createRestError(result); + } + + return knowledgeBaseRetrievalResponseDeserializer(result.body); +} + +/** KnowledgeBase retrieves relevant data from backing stores. */ +export async function retrieve( + context: Client, + knowledgeBaseName: string, + retrievalRequest: KnowledgeBaseRetrievalRequest, + options: RetrieveOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _retrieveSend(context, knowledgeBaseName, retrievalRequest, options); + return _retrieveDeserialize(result); +} diff --git a/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/options.ts b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/options.ts new file mode 100644 index 000000000000..ae04f201e1fd --- /dev/null +++ b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/options.ts @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface RetrieveOptionalParams extends OperationOptions { + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} diff --git a/sdk/search/search-documents/generated/knowledgeBaseRetrieval/index.ts b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/index.ts new file mode 100644 index 000000000000..53eccc42f981 --- /dev/null +++ b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/index.ts @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { KnowledgeBaseRetrievalClient } from "./knowledgeBaseRetrievalClient.js"; +export { + KnowledgeBaseRetrievalContext, + KnowledgeBaseRetrievalClientOptionalParams, + RetrieveOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/generated/knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.ts b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.ts new file mode 100644 index 000000000000..e3be22b4ebd8 --- /dev/null +++ b/sdk/search/search-documents/generated/knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.ts @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + createKnowledgeBaseRetrieval, + KnowledgeBaseRetrievalContext, + KnowledgeBaseRetrievalClientOptionalParams, +} from "./api/index.js"; +import { + KnowledgeBaseRetrievalRequest, + KnowledgeBaseRetrievalResponse, +} from "../models/azure/search/documents/knowledgeBase/models.js"; +import { retrieve } from "./api/operations.js"; +import { RetrieveOptionalParams } from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { KnowledgeBaseRetrievalClientOptionalParams } from "./api/knowledgeBaseRetrievalContext.js"; + +export class KnowledgeBaseRetrievalClient { + private _client: KnowledgeBaseRetrievalContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: KnowledgeBaseRetrievalClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createKnowledgeBaseRetrieval(endpointParam, credential, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** KnowledgeBase retrieves relevant data from backing stores. */ + retrieve( + knowledgeBaseName: string, + retrievalRequest: KnowledgeBaseRetrievalRequest, + options: RetrieveOptionalParams = { requestOptions: {} }, + ): Promise { + return retrieve(this._client, knowledgeBaseName, retrievalRequest, options); + } +} diff --git a/sdk/search/search-documents/generated/logger.ts b/sdk/search/search-documents/generated/logger.ts new file mode 100644 index 000000000000..d8dc5f9c8c13 --- /dev/null +++ b/sdk/search/search-documents/generated/logger.ts @@ -0,0 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { createClientLogger } from "@azure/logger"; +export const logger = createClientLogger("search-documents"); diff --git a/sdk/search/search-documents/generated/models/azure/search/documents/index.ts b/sdk/search/search-documents/generated/models/azure/search/documents/index.ts new file mode 100644 index 000000000000..7efdbd9aacf6 --- /dev/null +++ b/sdk/search/search-documents/generated/models/azure/search/documents/index.ts @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + ErrorResponse, + ErrorDetail, + ErrorAdditionalInfo, + SearchDocumentsResult, + FacetResult, + QueryAnswerResult, + DebugInfo, + QueryRewritesDebugInfo, + QueryRewritesValuesDebugInfo, + SearchRequest, + KnownQueryType, + QueryType, + KnownScoringStatistics, + ScoringStatistics, + KnownQueryDebugMode, + QueryDebugMode, + KnownSearchMode, + SearchMode, + KnownQueryLanguage, + QueryLanguage, + KnownQuerySpellerType, + QuerySpellerType, + KnownSemanticErrorMode, + SemanticErrorMode, + KnownQueryAnswerType, + QueryAnswerType, + KnownQueryCaptionType, + QueryCaptionType, + KnownQueryRewritesType, + QueryRewritesType, + VectorQuery, + VectorQueryUnion, + VectorThreshold, + VectorThresholdUnion, + KnownVectorThresholdKind, + VectorThresholdKind, + VectorSimilarityThreshold, + SearchScoreThreshold, + KnownVectorQueryKind, + VectorQueryKind, + VectorizedQuery, + VectorizableTextQuery, + VectorizableImageUrlQuery, + VectorizableImageBinaryQuery, + KnownVectorFilterMode, + VectorFilterMode, + HybridSearch, + KnownHybridCountAndFacetMode, + HybridCountAndFacetMode, + SearchResult, + QueryCaptionResult, + DocumentDebugInfo, + SemanticDebugInfo, + QueryResultDocumentSemanticField, + KnownSemanticFieldState, + SemanticFieldState, + QueryResultDocumentRerankerInput, + VectorsDebugInfo, + QueryResultDocumentSubscores, + TextResult, + SingleVectorFieldResult, + QueryResultDocumentInnerHit, + KnownSemanticErrorReason, + SemanticErrorReason, + KnownSemanticSearchResultsType, + SemanticSearchResultsType, + KnownSemanticQueryRewritesResultType, + SemanticQueryRewritesResultType, + LookupDocument, + SuggestDocumentsResult, + SuggestResult, + IndexDocumentsBatch, + IndexAction, + KnownIndexActionType, + IndexActionType, + IndexDocumentsResult, + IndexingResult, + AutocompleteResult, + AutocompleteItem, + KnownAutocompleteMode, + AutocompleteMode, +} from "./models.js"; diff --git a/sdk/search/search-documents/generated/models/azure/search/documents/indexes/index.ts b/sdk/search/search-documents/generated/models/azure/search/documents/indexes/index.ts new file mode 100644 index 000000000000..ce12b9ceeb72 --- /dev/null +++ b/sdk/search/search-documents/generated/models/azure/search/documents/indexes/index.ts @@ -0,0 +1,364 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + SynonymMap, + SearchResourceEncryptionKey, + AzureActiveDirectoryApplicationCredentials, + SearchIndexerDataIdentity, + SearchIndexerDataIdentityUnion, + SearchIndexerDataNoneIdentity, + SearchIndexerDataUserAssignedIdentity, + ListSynonymMapsResult, + SearchIndex, + SearchField, + KnownSearchFieldDataType, + SearchFieldDataType, + KnownPermissionFilter, + PermissionFilter, + KnownLexicalAnalyzerName, + LexicalAnalyzerName, + KnownLexicalNormalizerName, + LexicalNormalizerName, + KnownVectorEncodingFormat, + VectorEncodingFormat, + ScoringProfile, + TextWeights, + ScoringFunction, + ScoringFunctionUnion, + KnownScoringFunctionInterpolation, + ScoringFunctionInterpolation, + DistanceScoringFunction, + DistanceScoringParameters, + FreshnessScoringFunction, + FreshnessScoringParameters, + MagnitudeScoringFunction, + MagnitudeScoringParameters, + TagScoringFunction, + TagScoringParameters, + KnownScoringFunctionAggregation, + ScoringFunctionAggregation, + CorsOptions, + SearchSuggester, + LexicalAnalyzer, + LexicalAnalyzerUnion, + CustomAnalyzer, + KnownLexicalTokenizerName, + LexicalTokenizerName, + KnownTokenFilterName, + TokenFilterName, + KnownCharFilterName, + CharFilterName, + PatternAnalyzer, + KnownRegexFlags, + RegexFlags, + LuceneStandardAnalyzer, + StopAnalyzer, + LexicalTokenizer, + LexicalTokenizerUnion, + ClassicTokenizer, + EdgeNGramTokenizer, + KnownTokenCharacterKind, + TokenCharacterKind, + KeywordTokenizer, + MicrosoftLanguageTokenizer, + KnownMicrosoftTokenizerLanguage, + MicrosoftTokenizerLanguage, + MicrosoftLanguageStemmingTokenizer, + KnownMicrosoftStemmingTokenizerLanguage, + MicrosoftStemmingTokenizerLanguage, + NGramTokenizer, + PathHierarchyTokenizer, + PatternTokenizer, + LuceneStandardTokenizer, + UaxUrlEmailTokenizer, + TokenFilter, + TokenFilterUnion, + AsciiFoldingTokenFilter, + CjkBigramTokenFilter, + KnownCjkBigramTokenFilterScripts, + CjkBigramTokenFilterScripts, + CommonGramTokenFilter, + DictionaryDecompounderTokenFilter, + EdgeNGramTokenFilter, + KnownEdgeNGramTokenFilterSide, + EdgeNGramTokenFilterSide, + ElisionTokenFilter, + KeepTokenFilter, + KeywordMarkerTokenFilter, + LengthTokenFilter, + LimitTokenFilter, + NGramTokenFilter, + PatternCaptureTokenFilter, + PatternReplaceTokenFilter, + PhoneticTokenFilter, + KnownPhoneticEncoder, + PhoneticEncoder, + ShingleTokenFilter, + SnowballTokenFilter, + KnownSnowballTokenFilterLanguage, + SnowballTokenFilterLanguage, + StemmerTokenFilter, + KnownStemmerTokenFilterLanguage, + StemmerTokenFilterLanguage, + StemmerOverrideTokenFilter, + StopwordsTokenFilter, + KnownStopwordsList, + StopwordsList, + SynonymTokenFilter, + TruncateTokenFilter, + UniqueTokenFilter, + WordDelimiterTokenFilter, + CharFilter, + CharFilterUnion, + MappingCharFilter, + PatternReplaceCharFilter, + LexicalNormalizer, + LexicalNormalizerUnion, + CustomNormalizer, + SimilarityAlgorithm, + SimilarityAlgorithmUnion, + ClassicSimilarity, + BM25Similarity, + SemanticSearch, + SemanticConfiguration, + SemanticPrioritizedFields, + SemanticField, + KnownRankingOrder, + RankingOrder, + VectorSearch, + VectorSearchProfile, + VectorSearchAlgorithmConfiguration, + VectorSearchAlgorithmConfigurationUnion, + KnownVectorSearchAlgorithmKind, + VectorSearchAlgorithmKind, + HnswAlgorithmConfiguration, + HnswParameters, + KnownVectorSearchAlgorithmMetric, + VectorSearchAlgorithmMetric, + ExhaustiveKnnAlgorithmConfiguration, + ExhaustiveKnnParameters, + VectorSearchVectorizer, + VectorSearchVectorizerUnion, + KnownVectorSearchVectorizerKind, + VectorSearchVectorizerKind, + AzureOpenAIVectorizer, + AzureOpenAIVectorizerParameters, + KnownAzureOpenAIModelName, + AzureOpenAIModelName, + WebApiVectorizer, + WebApiVectorizerParameters, + AIServicesVisionVectorizer, + AIServicesVisionParameters, + AzureMachineLearningVectorizer, + AzureMachineLearningParameters, + KnownAIFoundryModelCatalogName, + AIFoundryModelCatalogName, + VectorSearchCompression, + VectorSearchCompressionUnion, + RescoringOptions, + KnownVectorSearchCompressionRescoreStorageMethod, + VectorSearchCompressionRescoreStorageMethod, + KnownVectorSearchCompressionKind, + VectorSearchCompressionKind, + ScalarQuantizationCompression, + ScalarQuantizationParameters, + KnownVectorSearchCompressionTarget, + VectorSearchCompressionTarget, + BinaryQuantizationCompression, + KnownSearchIndexPermissionFilterOption, + SearchIndexPermissionFilterOption, + GetIndexStatisticsResult, + AnalyzeTextOptions, + AnalyzeResult, + AnalyzedTokenInfo, + SearchAlias, + KnowledgeBase, + KnowledgeSourceReference, + KnowledgeBaseModel, + KnowledgeBaseModelUnion, + KnownKnowledgeBaseModelKind, + KnowledgeBaseModelKind, + KnowledgeBaseAzureOpenAIModel, + AzureOpenAiParameters, + KnowledgeSource, + KnowledgeSourceUnion, + KnownKnowledgeSourceKind, + KnowledgeSourceKind, + SearchIndexKnowledgeSource, + SearchIndexKnowledgeSourceParameters, + AzureBlobKnowledgeSource, + AzureBlobKnowledgeSourceParameters, + IndexingSchedule, + CreatedResources, + KnownBlobIndexerDataToExtract, + BlobIndexerDataToExtract, + KnownBlobIndexerImageAction, + BlobIndexerImageAction, + KnownBlobIndexerParsingMode, + BlobIndexerParsingMode, + KnownMarkdownHeaderDepth, + MarkdownHeaderDepth, + KnownMarkdownParsingSubmode, + MarkdownParsingSubmode, + KnownBlobIndexerPDFTextRotationAlgorithm, + BlobIndexerPDFTextRotationAlgorithm, + SearchServiceStatistics, + ServiceCounters, + ResourceCounter, + ServiceLimits, + IndexStatisticsSummary, + SearchIndexerDataSourceConnection, + KnownSearchIndexerDataSourceType, + SearchIndexerDataSourceType, + DataSourceCredentials, + SearchIndexerDataContainer, + KnownIndexerPermissionOption, + IndexerPermissionOption, + DataChangeDetectionPolicy, + DataChangeDetectionPolicyUnion, + HighWaterMarkChangeDetectionPolicy, + SqlIntegratedChangeTrackingPolicy, + DataDeletionDetectionPolicy, + DataDeletionDetectionPolicyUnion, + SoftDeleteColumnDeletionDetectionPolicy, + NativeBlobSoftDeleteDeletionDetectionPolicy, + ListDataSourcesResult, + DocumentKeysOrIds, + SearchIndexer, + IndexingParameters, + IndexingParametersConfiguration, + KnownIndexerExecutionEnvironment, + IndexerExecutionEnvironment, + FieldMapping, + FieldMappingFunction, + SearchIndexerCache, + ListIndexersResult, + SearchIndexerStatus, + KnownIndexerStatus, + IndexerStatus, + IndexerExecutionResult, + KnownIndexerExecutionStatus, + IndexerExecutionStatus, + KnownIndexerExecutionStatusDetail, + IndexerExecutionStatusDetail, + KnownIndexingMode, + IndexingMode, + IndexerCurrentState, + SearchIndexerError, + SearchIndexerWarning, + SearchIndexerLimits, + SearchIndexerSkillset, + SearchIndexerSkill, + SearchIndexerSkillUnion, + InputFieldMappingEntry, + OutputFieldMappingEntry, + ConditionalSkill, + KeyPhraseExtractionSkill, + KnownKeyPhraseExtractionSkillLanguage, + KeyPhraseExtractionSkillLanguage, + OcrSkill, + KnownOcrSkillLanguage, + OcrSkillLanguage, + KnownOcrLineEnding, + OcrLineEnding, + ImageAnalysisSkill, + KnownImageAnalysisSkillLanguage, + ImageAnalysisSkillLanguage, + KnownVisualFeature, + VisualFeature, + KnownImageDetail, + ImageDetail, + LanguageDetectionSkill, + ShaperSkill, + MergeSkill, + EntityRecognitionSkill, + KnownEntityCategory, + EntityCategory, + KnownEntityRecognitionSkillLanguage, + EntityRecognitionSkillLanguage, + SentimentSkill, + KnownSentimentSkillLanguage, + SentimentSkillLanguage, + SentimentSkillV3, + EntityLinkingSkill, + EntityRecognitionSkillV3, + PIIDetectionSkill, + KnownPIIDetectionSkillMaskingMode, + PIIDetectionSkillMaskingMode, + SplitSkill, + KnownSplitSkillLanguage, + SplitSkillLanguage, + KnownTextSplitMode, + TextSplitMode, + KnownSplitSkillUnit, + SplitSkillUnit, + AzureOpenAITokenizerParameters, + KnownSplitSkillEncoderModelName, + SplitSkillEncoderModelName, + CustomEntityLookupSkill, + KnownCustomEntityLookupSkillLanguage, + CustomEntityLookupSkillLanguage, + CustomEntity, + CustomEntityAlias, + TextTranslationSkill, + KnownTextTranslationSkillLanguage, + TextTranslationSkillLanguage, + DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, + KnownDocumentIntelligenceLayoutSkillOutputFormat, + DocumentIntelligenceLayoutSkillOutputFormat, + KnownDocumentIntelligenceLayoutSkillOutputMode, + DocumentIntelligenceLayoutSkillOutputMode, + KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + KnownDocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillChunkingProperties, + KnownDocumentIntelligenceLayoutSkillChunkingUnit, + DocumentIntelligenceLayoutSkillChunkingUnit, + WebApiSkill, + AzureMachineLearningSkill, + AzureOpenAIEmbeddingSkill, + VisionVectorizeSkill, + ContentUnderstandingSkill, + KnownContentUnderstandingSkillExtractionOptions, + ContentUnderstandingSkillExtractionOptions, + ContentUnderstandingSkillChunkingProperties, + KnownContentUnderstandingSkillChunkingUnit, + ContentUnderstandingSkillChunkingUnit, + ChatCompletionSkill, + WebApiHttpHeaders, + CommonModelParameters, + KnownChatCompletionExtraParametersBehavior, + ChatCompletionExtraParametersBehavior, + ChatCompletionResponseFormat, + KnownChatCompletionResponseFormatType, + ChatCompletionResponseFormatType, + ChatCompletionSchemaProperties, + ChatCompletionSchema, + CognitiveServicesAccount, + CognitiveServicesAccountUnion, + DefaultCognitiveServicesAccount, + CognitiveServicesAccountKey, + AIServicesAccountKey, + AIServicesAccountIdentity, + SearchIndexerKnowledgeStore, + SearchIndexerKnowledgeStoreProjection, + SearchIndexerKnowledgeStoreTableProjectionSelector, + SearchIndexerKnowledgeStoreObjectProjectionSelector, + SearchIndexerKnowledgeStoreFileProjectionSelector, + SearchIndexerKnowledgeStoreParameters, + SearchIndexerIndexProjection, + SearchIndexerIndexProjectionSelector, + SearchIndexerIndexProjectionsParameters, + KnownIndexProjectionMode, + IndexProjectionMode, + SearchIndexerKnowledgeStoreProjectionSelector, + SearchIndexerKnowledgeStoreBlobProjectionSelector, + ListSkillsetsResult, + SkillNames, + IndexerResyncBody, + KnownIndexerResyncOption, + IndexerResyncOption, +} from "./models.js"; diff --git a/sdk/search/search-documents/generated/models/azure/search/documents/indexes/models.ts b/sdk/search/search-documents/generated/models/azure/search/documents/indexes/models.ts new file mode 100644 index 000000000000..6a75b907caad --- /dev/null +++ b/sdk/search/search-documents/generated/models/azure/search/documents/indexes/models.ts @@ -0,0 +1,11949 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { serializeRecord } from "../../../../../static-helpers/serialization/serialize-record.js"; +import { + IndexedSharePointKnowledgeSource, + IndexedOneLakeKnowledgeSource, + WebKnowledgeSource, + RemoteSharePointKnowledgeSource, + ServiceIndexersRuntime, + serviceIndexersRuntimeDeserializer, + IndexerRuntime, + indexerRuntimeDeserializer, +} from "../../../../models.js"; +import { + knowledgeRetrievalReasoningEffortUnionSerializer, + knowledgeRetrievalReasoningEffortUnionDeserializer, + KnowledgeRetrievalReasoningEffortUnion, + KnowledgeRetrievalOutputMode, +} from "../knowledgeBase/models.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Represents a synonym map definition. */ +export interface SynonymMap { + /** The name of the synonym map. */ + name: string; + /** The format of the synonym map. Only the 'solr' format is currently supported. */ + format: "solr"; + /** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */ + synonyms: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; + /** The ETag of the synonym map. */ + eTag?: string; +} + +export function synonymMapSerializer(item: SynonymMap): any { + return { + name: item["name"], + format: item["format"], + synonyms: item["synonyms"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + "@odata.etag": item["eTag"], + }; +} + +export function synonymMapDeserializer(item: any): SynonymMap { + return { + name: item["name"], + format: item["format"], + synonyms: item["synonyms"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + eTag: item["@odata.etag"], + }; +} + +/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */ +export interface SearchResourceEncryptionKey { + /** The name of your Azure Key Vault key to be used to encrypt your data at rest. */ + keyName: string; + /** The version of your Azure Key Vault key to be used to encrypt your data at rest. */ + keyVersion?: string; + /** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be `https://my-keyvault-name.vault.azure.net`. */ + vaultUri: string; + /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */ + accessCredentials?: AzureActiveDirectoryApplicationCredentials; + /** An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified, the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; +} + +export function searchResourceEncryptionKeySerializer(item: SearchResourceEncryptionKey): any { + return { + keyVaultKeyName: item["keyName"], + keyVaultKeyVersion: item["keyVersion"], + keyVaultUri: item["vaultUri"], + accessCredentials: !item["accessCredentials"] + ? item["accessCredentials"] + : azureActiveDirectoryApplicationCredentialsSerializer(item["accessCredentials"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + }; +} + +export function searchResourceEncryptionKeyDeserializer(item: any): SearchResourceEncryptionKey { + return { + keyName: item["keyVaultKeyName"], + keyVersion: item["keyVaultKeyVersion"], + vaultUri: item["keyVaultUri"], + accessCredentials: !item["accessCredentials"] + ? item["accessCredentials"] + : azureActiveDirectoryApplicationCredentialsDeserializer(item["accessCredentials"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + }; +} + +/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */ +export interface AzureActiveDirectoryApplicationCredentials { + /** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */ + applicationId: string; + /** The authentication key of the specified AAD application. */ + applicationSecret?: string; +} + +export function azureActiveDirectoryApplicationCredentialsSerializer( + item: AzureActiveDirectoryApplicationCredentials, +): any { + return { + applicationId: item["applicationId"], + applicationSecret: item["applicationSecret"], + }; +} + +export function azureActiveDirectoryApplicationCredentialsDeserializer( + item: any, +): AzureActiveDirectoryApplicationCredentials { + return { + applicationId: item["applicationId"], + applicationSecret: item["applicationSecret"], + }; +} + +/** Abstract base type for data identities. */ +export interface SearchIndexerDataIdentity { + /** A URI fragment specifying the type of identity. */ + /** The discriminator possible values: #Microsoft.Azure.Search.DataNoneIdentity, #Microsoft.Azure.Search.DataUserAssignedIdentity */ + odatatype: string; +} + +export function searchIndexerDataIdentitySerializer(item: SearchIndexerDataIdentity): any { + return { "@odata.type": item["odatatype"] }; +} + +export function searchIndexerDataIdentityDeserializer(item: any): SearchIndexerDataIdentity { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for SearchIndexerDataIdentityUnion */ +export type SearchIndexerDataIdentityUnion = + | SearchIndexerDataNoneIdentity + | SearchIndexerDataUserAssignedIdentity + | SearchIndexerDataIdentity; + +export function searchIndexerDataIdentityUnionSerializer( + item: SearchIndexerDataIdentityUnion, +): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DataNoneIdentity": + return searchIndexerDataNoneIdentitySerializer(item as SearchIndexerDataNoneIdentity); + + case "#Microsoft.Azure.Search.DataUserAssignedIdentity": + return searchIndexerDataUserAssignedIdentitySerializer( + item as SearchIndexerDataUserAssignedIdentity, + ); + + default: + return searchIndexerDataIdentitySerializer(item); + } +} + +export function searchIndexerDataIdentityUnionDeserializer( + item: any, +): SearchIndexerDataIdentityUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DataNoneIdentity": + return searchIndexerDataNoneIdentityDeserializer(item as SearchIndexerDataNoneIdentity); + + case "#Microsoft.Azure.Search.DataUserAssignedIdentity": + return searchIndexerDataUserAssignedIdentityDeserializer( + item as SearchIndexerDataUserAssignedIdentity, + ); + + default: + return searchIndexerDataIdentityDeserializer(item); + } +} + +/** Clears the identity property of a datasource. */ +export interface SearchIndexerDataNoneIdentity extends SearchIndexerDataIdentity { + /** The discriminator for derived types. */ + odatatype: "#Microsoft.Azure.Search.DataNoneIdentity"; +} + +export function searchIndexerDataNoneIdentitySerializer(item: SearchIndexerDataNoneIdentity): any { + return { "@odata.type": item["odatatype"] }; +} + +export function searchIndexerDataNoneIdentityDeserializer( + item: any, +): SearchIndexerDataNoneIdentity { + return { + odatatype: item["@odata.type"], + }; +} + +/** Specifies the identity for a datasource to use. */ +export interface SearchIndexerDataUserAssignedIdentity extends SearchIndexerDataIdentity { + /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */ + resourceId: string; + /** A URI fragment specifying the type of identity. */ + odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity"; +} + +export function searchIndexerDataUserAssignedIdentitySerializer( + item: SearchIndexerDataUserAssignedIdentity, +): any { + return { + "@odata.type": item["odatatype"], + userAssignedIdentity: item["resourceId"], + }; +} + +export function searchIndexerDataUserAssignedIdentityDeserializer( + item: any, +): SearchIndexerDataUserAssignedIdentity { + return { + odatatype: item["@odata.type"], + resourceId: item["userAssignedIdentity"], + }; +} + +/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */ +export interface ListSynonymMapsResult { + /** The synonym maps in the Search service. */ + synonymMaps: SynonymMap[]; +} + +export function listSynonymMapsResultDeserializer(item: any): ListSynonymMapsResult { + return { + synonymMaps: synonymMapArrayDeserializer(item["value"]), + }; +} + +export function synonymMapArraySerializer(result: Array): any[] { + return result.map((item) => { + return synonymMapSerializer(item); + }); +} + +export function synonymMapArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return synonymMapDeserializer(item); + }); +} + +/** Represents a search index definition, which describes the fields and search behavior of an index. */ +export interface SearchIndex { + /** The name of the index. */ + name: string; + /** The description of the index. */ + description?: string; + /** The fields of the index. */ + fields: SearchField[]; + /** The scoring profiles for the index. */ + scoringProfiles?: ScoringProfile[]; + /** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */ + defaultScoringProfile?: string; + /** Options to control Cross-Origin Resource Sharing (CORS) for the index. */ + corsOptions?: CorsOptions; + /** The suggesters for the index. */ + suggesters?: SearchSuggester[]; + /** The analyzers for the index. */ + analyzers?: LexicalAnalyzerUnion[]; + /** The tokenizers for the index. */ + tokenizers?: LexicalTokenizerUnion[]; + /** The token filters for the index. */ + tokenFilters?: TokenFilterUnion[]; + /** The character filters for the index. */ + charFilters?: CharFilterUnion[]; + /** The normalizers for the index. */ + normalizers?: LexicalNormalizerUnion[]; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; + /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */ + similarity?: SimilarityAlgorithmUnion; + /** Defines parameters for a search index that influence semantic capabilities. */ + semanticSearch?: SemanticSearch; + /** Contains configuration options related to vector search. */ + vectorSearch?: VectorSearch; + /** A value indicating whether permission filtering is enabled for the index. */ + permissionFilterOption?: SearchIndexPermissionFilterOption; + /** A value indicating whether Purview is enabled for the index. */ + purviewEnabled?: boolean; + /** The ETag of the index. */ + eTag?: string; +} + +export function searchIndexSerializer(item: SearchIndex): any { + return { + name: item["name"], + description: item["description"], + fields: searchFieldArraySerializer(item["fields"]), + scoringProfiles: !item["scoringProfiles"] + ? item["scoringProfiles"] + : scoringProfileArraySerializer(item["scoringProfiles"]), + defaultScoringProfile: item["defaultScoringProfile"], + corsOptions: !item["corsOptions"] + ? item["corsOptions"] + : corsOptionsSerializer(item["corsOptions"]), + suggesters: !item["suggesters"] + ? item["suggesters"] + : searchSuggesterArraySerializer(item["suggesters"]), + analyzers: !item["analyzers"] + ? item["analyzers"] + : lexicalAnalyzerUnionArraySerializer(item["analyzers"]), + tokenizers: !item["tokenizers"] + ? item["tokenizers"] + : lexicalTokenizerUnionArraySerializer(item["tokenizers"]), + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : tokenFilterUnionArraySerializer(item["tokenFilters"]), + charFilters: !item["charFilters"] + ? item["charFilters"] + : charFilterUnionArraySerializer(item["charFilters"]), + normalizers: !item["normalizers"] + ? item["normalizers"] + : lexicalNormalizerUnionArraySerializer(item["normalizers"]), + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + similarity: !item["similarity"] + ? item["similarity"] + : similarityAlgorithmUnionSerializer(item["similarity"]), + semantic: !item["semanticSearch"] + ? item["semanticSearch"] + : semanticSearchSerializer(item["semanticSearch"]), + vectorSearch: !item["vectorSearch"] + ? item["vectorSearch"] + : vectorSearchSerializer(item["vectorSearch"]), + permissionFilterOption: item["permissionFilterOption"], + purviewEnabled: item["purviewEnabled"], + "@odata.etag": item["eTag"], + }; +} + +export function searchIndexDeserializer(item: any): SearchIndex { + return { + name: item["name"], + description: item["description"], + fields: searchFieldArrayDeserializer(item["fields"]), + scoringProfiles: !item["scoringProfiles"] + ? item["scoringProfiles"] + : scoringProfileArrayDeserializer(item["scoringProfiles"]), + defaultScoringProfile: item["defaultScoringProfile"], + corsOptions: !item["corsOptions"] + ? item["corsOptions"] + : corsOptionsDeserializer(item["corsOptions"]), + suggesters: !item["suggesters"] + ? item["suggesters"] + : searchSuggesterArrayDeserializer(item["suggesters"]), + analyzers: !item["analyzers"] + ? item["analyzers"] + : lexicalAnalyzerUnionArrayDeserializer(item["analyzers"]), + tokenizers: !item["tokenizers"] + ? item["tokenizers"] + : lexicalTokenizerUnionArrayDeserializer(item["tokenizers"]), + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : tokenFilterUnionArrayDeserializer(item["tokenFilters"]), + charFilters: !item["charFilters"] + ? item["charFilters"] + : charFilterUnionArrayDeserializer(item["charFilters"]), + normalizers: !item["normalizers"] + ? item["normalizers"] + : lexicalNormalizerUnionArrayDeserializer(item["normalizers"]), + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + similarity: !item["similarity"] + ? item["similarity"] + : similarityAlgorithmUnionDeserializer(item["similarity"]), + semanticSearch: !item["semantic"] + ? item["semantic"] + : semanticSearchDeserializer(item["semantic"]), + vectorSearch: !item["vectorSearch"] + ? item["vectorSearch"] + : vectorSearchDeserializer(item["vectorSearch"]), + permissionFilterOption: item["permissionFilterOption"], + purviewEnabled: item["purviewEnabled"], + eTag: item["@odata.etag"], + }; +} + +export function searchFieldArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchFieldSerializer(item); + }); +} + +export function searchFieldArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchFieldDeserializer(item); + }); +} + +/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */ +export interface SearchField { + /** The name of the field, which must be unique within the fields collection of the index or parent field. */ + name: string; + /** The data type of the field. */ + type: SearchFieldDataType; + /** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */ + key?: boolean; + /** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields. */ + retrievable?: boolean; + /** An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage overhead. This can only be set during index creation and only for vector fields. This property cannot be changed for existing fields or set as false for new fields. If this property is set as false, the property 'retrievable' must also be set to false. This property must be true or unset for key fields, for new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */ + stored?: boolean; + /** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */ + searchable?: boolean; + /** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */ + filterable?: boolean; + /** A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */ + sortable?: boolean; + /** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */ + facetable?: boolean; + /** A value indicating whether the field should be used as a permission filter. */ + permissionFilter?: PermissionFilter; + /** A value indicating whether the field contains sensitivity label information. */ + sensitivityLabel?: boolean; + /** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + analyzerName?: LexicalAnalyzerName; + /** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */ + searchAnalyzerName?: LexicalAnalyzerName; + /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + indexAnalyzerName?: LexicalAnalyzerName; + /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + normalizerName?: LexicalNormalizerName; + /** The dimensionality of the vector field. */ + vectorSearchDimensions?: number; + /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */ + vectorSearchProfileName?: string; + /** The encoding format to interpret the field contents. */ + vectorEncodingFormat?: VectorEncodingFormat; + /** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */ + synonymMapNames?: string[]; + /** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */ + fields?: SearchField[]; +} + +export function searchFieldSerializer(item: SearchField): any { + return { + name: item["name"], + type: item["type"], + key: item["key"], + retrievable: item["retrievable"], + stored: item["stored"], + searchable: item["searchable"], + filterable: item["filterable"], + sortable: item["sortable"], + facetable: item["facetable"], + permissionFilter: item["permissionFilter"], + sensitivityLabel: item["sensitivityLabel"], + analyzer: item["analyzerName"], + searchAnalyzer: item["searchAnalyzerName"], + indexAnalyzer: item["indexAnalyzerName"], + normalizer: item["normalizerName"], + dimensions: item["vectorSearchDimensions"], + vectorSearchProfile: item["vectorSearchProfileName"], + vectorEncoding: item["vectorEncodingFormat"], + synonymMaps: !item["synonymMapNames"] + ? item["synonymMapNames"] + : item["synonymMapNames"].map((p: any) => { + return p; + }), + fields: !item["fields"] ? item["fields"] : searchFieldArraySerializer(item["fields"]), + }; +} + +export function searchFieldDeserializer(item: any): SearchField { + return { + name: item["name"], + type: item["type"], + key: item["key"], + retrievable: item["retrievable"], + stored: item["stored"], + searchable: item["searchable"], + filterable: item["filterable"], + sortable: item["sortable"], + facetable: item["facetable"], + permissionFilter: item["permissionFilter"], + sensitivityLabel: item["sensitivityLabel"], + analyzerName: item["analyzer"], + searchAnalyzerName: item["searchAnalyzer"], + indexAnalyzerName: item["indexAnalyzer"], + normalizerName: item["normalizer"], + vectorSearchDimensions: item["dimensions"], + vectorSearchProfileName: item["vectorSearchProfile"], + vectorEncodingFormat: item["vectorEncoding"], + synonymMapNames: !item["synonymMaps"] + ? item["synonymMaps"] + : item["synonymMaps"].map((p: any) => { + return p; + }), + fields: !item["fields"] ? item["fields"] : searchFieldArrayDeserializer(item["fields"]), + }; +} + +/** Defines the data type of a field in a search index. */ +export enum KnownSearchFieldDataType { + /** Indicates that a field contains a string. */ + String = "Edm.String", + /** Indicates that a field contains a 32-bit signed integer. */ + Int32 = "Edm.Int32", + /** Indicates that a field contains a 64-bit signed integer. */ + Int64 = "Edm.Int64", + /** Indicates that a field contains an IEEE double-precision floating point number. */ + Double = "Edm.Double", + /** Indicates that a field contains a Boolean value (true or false). */ + Boolean = "Edm.Boolean", + /** Indicates that a field contains a date/time value, including timezone information. */ + DateTimeOffset = "Edm.DateTimeOffset", + /** Indicates that a field contains a geo-location in terms of longitude and latitude. */ + GeographyPoint = "Edm.GeographyPoint", + /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */ + Complex = "Edm.ComplexType", + /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */ + Single = "Edm.Single", + /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */ + Half = "Edm.Half", + /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */ + Int16 = "Edm.Int16", + /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */ + SByte = "Edm.SByte", + /** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */ + Byte = "Edm.Byte", +} + +/** + * Defines the data type of a field in a search index. \ + * {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **Edm.String**: Indicates that a field contains a string. \ + * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \ + * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \ + * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \ + * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \ + * **Edm.DateTimeOffset**: Indicates that a field contains a date\/time value, including timezone information. \ + * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \ + * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \ + * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). \ + * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). \ + * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). \ + * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). \ + * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). + */ +export type SearchFieldDataType = string; + +/** A value indicating whether the field should be used as a permission filter. */ +export enum KnownPermissionFilter { + /** Field represents user IDs that should be used to filter document access on queries. */ + UserIds = "userIds", + /** Field represents group IDs that should be used to filter document access on queries. */ + GroupIds = "groupIds", + /** Field represents an RBAC scope that should be used to filter document access on queries. */ + RbacScope = "rbacScope", +} + +/** + * A value indicating whether the field should be used as a permission filter. \ + * {@link KnownPermissionFilter} can be used interchangeably with PermissionFilter, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **userIds**: Field represents user IDs that should be used to filter document access on queries. \ + * **groupIds**: Field represents group IDs that should be used to filter document access on queries. \ + * **rbacScope**: Field represents an RBAC scope that should be used to filter document access on queries. + */ +export type PermissionFilter = string; + +/** Defines the names of all text analyzers supported by the search engine. */ +export enum KnownLexicalAnalyzerName { + /** Microsoft analyzer for Arabic. */ + ArMicrosoft = "ar.microsoft", + /** Lucene analyzer for Arabic. */ + ArLucene = "ar.lucene", + /** Lucene analyzer for Armenian. */ + HyLucene = "hy.lucene", + /** Microsoft analyzer for Bangla. */ + BnMicrosoft = "bn.microsoft", + /** Lucene analyzer for Basque. */ + EuLucene = "eu.lucene", + /** Microsoft analyzer for Bulgarian. */ + BgMicrosoft = "bg.microsoft", + /** Lucene analyzer for Bulgarian. */ + BgLucene = "bg.lucene", + /** Microsoft analyzer for Catalan. */ + CaMicrosoft = "ca.microsoft", + /** Lucene analyzer for Catalan. */ + CaLucene = "ca.lucene", + /** Microsoft analyzer for Chinese (Simplified). */ + ZhHansMicrosoft = "zh-Hans.microsoft", + /** Lucene analyzer for Chinese (Simplified). */ + ZhHansLucene = "zh-Hans.lucene", + /** Microsoft analyzer for Chinese (Traditional). */ + ZhHantMicrosoft = "zh-Hant.microsoft", + /** Lucene analyzer for Chinese (Traditional). */ + ZhHantLucene = "zh-Hant.lucene", + /** Microsoft analyzer for Croatian. */ + HrMicrosoft = "hr.microsoft", + /** Microsoft analyzer for Czech. */ + CsMicrosoft = "cs.microsoft", + /** Lucene analyzer for Czech. */ + CsLucene = "cs.lucene", + /** Microsoft analyzer for Danish. */ + DaMicrosoft = "da.microsoft", + /** Lucene analyzer for Danish. */ + DaLucene = "da.lucene", + /** Microsoft analyzer for Dutch. */ + NlMicrosoft = "nl.microsoft", + /** Lucene analyzer for Dutch. */ + NlLucene = "nl.lucene", + /** Microsoft analyzer for English. */ + EnMicrosoft = "en.microsoft", + /** Lucene analyzer for English. */ + EnLucene = "en.lucene", + /** Microsoft analyzer for Estonian. */ + EtMicrosoft = "et.microsoft", + /** Microsoft analyzer for Finnish. */ + FiMicrosoft = "fi.microsoft", + /** Lucene analyzer for Finnish. */ + FiLucene = "fi.lucene", + /** Microsoft analyzer for French. */ + FrMicrosoft = "fr.microsoft", + /** Lucene analyzer for French. */ + FrLucene = "fr.lucene", + /** Lucene analyzer for Galician. */ + GlLucene = "gl.lucene", + /** Microsoft analyzer for German. */ + DeMicrosoft = "de.microsoft", + /** Lucene analyzer for German. */ + DeLucene = "de.lucene", + /** Microsoft analyzer for Greek. */ + ElMicrosoft = "el.microsoft", + /** Lucene analyzer for Greek. */ + ElLucene = "el.lucene", + /** Microsoft analyzer for Gujarati. */ + GuMicrosoft = "gu.microsoft", + /** Microsoft analyzer for Hebrew. */ + HeMicrosoft = "he.microsoft", + /** Microsoft analyzer for Hindi. */ + HiMicrosoft = "hi.microsoft", + /** Lucene analyzer for Hindi. */ + HiLucene = "hi.lucene", + /** Microsoft analyzer for Hungarian. */ + HuMicrosoft = "hu.microsoft", + /** Lucene analyzer for Hungarian. */ + HuLucene = "hu.lucene", + /** Microsoft analyzer for Icelandic. */ + IsMicrosoft = "is.microsoft", + /** Microsoft analyzer for Indonesian (Bahasa). */ + IdMicrosoft = "id.microsoft", + /** Lucene analyzer for Indonesian. */ + IdLucene = "id.lucene", + /** Lucene analyzer for Irish. */ + GaLucene = "ga.lucene", + /** Microsoft analyzer for Italian. */ + ItMicrosoft = "it.microsoft", + /** Lucene analyzer for Italian. */ + ItLucene = "it.lucene", + /** Microsoft analyzer for Japanese. */ + JaMicrosoft = "ja.microsoft", + /** Lucene analyzer for Japanese. */ + JaLucene = "ja.lucene", + /** Microsoft analyzer for Kannada. */ + KnMicrosoft = "kn.microsoft", + /** Microsoft analyzer for Korean. */ + KoMicrosoft = "ko.microsoft", + /** Lucene analyzer for Korean. */ + KoLucene = "ko.lucene", + /** Microsoft analyzer for Latvian. */ + LvMicrosoft = "lv.microsoft", + /** Lucene analyzer for Latvian. */ + LvLucene = "lv.lucene", + /** Microsoft analyzer for Lithuanian. */ + LtMicrosoft = "lt.microsoft", + /** Microsoft analyzer for Malayalam. */ + MlMicrosoft = "ml.microsoft", + /** Microsoft analyzer for Malay (Latin). */ + MsMicrosoft = "ms.microsoft", + /** Microsoft analyzer for Marathi. */ + MrMicrosoft = "mr.microsoft", + /** Microsoft analyzer for Norwegian (Bokmål). */ + NbMicrosoft = "nb.microsoft", + /** Lucene analyzer for Norwegian. */ + NoLucene = "no.lucene", + /** Lucene analyzer for Persian. */ + FaLucene = "fa.lucene", + /** Microsoft analyzer for Polish. */ + PlMicrosoft = "pl.microsoft", + /** Lucene analyzer for Polish. */ + PlLucene = "pl.lucene", + /** Microsoft analyzer for Portuguese (Brazil). */ + PtBrMicrosoft = "pt-BR.microsoft", + /** Lucene analyzer for Portuguese (Brazil). */ + PtBrLucene = "pt-BR.lucene", + /** Microsoft analyzer for Portuguese (Portugal). */ + PtPtMicrosoft = "pt-PT.microsoft", + /** Lucene analyzer for Portuguese (Portugal). */ + PtPtLucene = "pt-PT.lucene", + /** Microsoft analyzer for Punjabi. */ + PaMicrosoft = "pa.microsoft", + /** Microsoft analyzer for Romanian. */ + RoMicrosoft = "ro.microsoft", + /** Lucene analyzer for Romanian. */ + RoLucene = "ro.lucene", + /** Microsoft analyzer for Russian. */ + RuMicrosoft = "ru.microsoft", + /** Lucene analyzer for Russian. */ + RuLucene = "ru.lucene", + /** Microsoft analyzer for Serbian (Cyrillic). */ + SrCyrillicMicrosoft = "sr-cyrillic.microsoft", + /** Microsoft analyzer for Serbian (Latin). */ + SrLatinMicrosoft = "sr-latin.microsoft", + /** Microsoft analyzer for Slovak. */ + SkMicrosoft = "sk.microsoft", + /** Microsoft analyzer for Slovenian. */ + SlMicrosoft = "sl.microsoft", + /** Microsoft analyzer for Spanish. */ + EsMicrosoft = "es.microsoft", + /** Lucene analyzer for Spanish. */ + EsLucene = "es.lucene", + /** Microsoft analyzer for Swedish. */ + SvMicrosoft = "sv.microsoft", + /** Lucene analyzer for Swedish. */ + SvLucene = "sv.lucene", + /** Microsoft analyzer for Tamil. */ + TaMicrosoft = "ta.microsoft", + /** Microsoft analyzer for Telugu. */ + TeMicrosoft = "te.microsoft", + /** Microsoft analyzer for Thai. */ + ThMicrosoft = "th.microsoft", + /** Lucene analyzer for Thai. */ + ThLucene = "th.lucene", + /** Microsoft analyzer for Turkish. */ + TrMicrosoft = "tr.microsoft", + /** Lucene analyzer for Turkish. */ + TrLucene = "tr.lucene", + /** Microsoft analyzer for Ukrainian. */ + UkMicrosoft = "uk.microsoft", + /** Microsoft analyzer for Urdu. */ + UrMicrosoft = "ur.microsoft", + /** Microsoft analyzer for Vietnamese. */ + ViMicrosoft = "vi.microsoft", + /** Standard Lucene analyzer. */ + StandardLucene = "standard.lucene", + /** Standard ASCII Folding Lucene analyzer. See https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers */ + StandardAsciiFoldingLucene = "standardasciifolding.lucene", + /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html */ + Keyword = "keyword", + /** Flexibly separates text into terms via a regular expression pattern. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html */ + Pattern = "pattern", + /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html */ + Simple = "simple", + /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html */ + Stop = "stop", + /** An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html */ + Whitespace = "whitespace", +} + +/** + * Defines the names of all text analyzers supported by the search engine. \ + * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **ar.microsoft**: Microsoft analyzer for Arabic. \ + * **ar.lucene**: Lucene analyzer for Arabic. \ + * **hy.lucene**: Lucene analyzer for Armenian. \ + * **bn.microsoft**: Microsoft analyzer for Bangla. \ + * **eu.lucene**: Lucene analyzer for Basque. \ + * **bg.microsoft**: Microsoft analyzer for Bulgarian. \ + * **bg.lucene**: Lucene analyzer for Bulgarian. \ + * **ca.microsoft**: Microsoft analyzer for Catalan. \ + * **ca.lucene**: Lucene analyzer for Catalan. \ + * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \ + * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \ + * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \ + * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \ + * **hr.microsoft**: Microsoft analyzer for Croatian. \ + * **cs.microsoft**: Microsoft analyzer for Czech. \ + * **cs.lucene**: Lucene analyzer for Czech. \ + * **da.microsoft**: Microsoft analyzer for Danish. \ + * **da.lucene**: Lucene analyzer for Danish. \ + * **nl.microsoft**: Microsoft analyzer for Dutch. \ + * **nl.lucene**: Lucene analyzer for Dutch. \ + * **en.microsoft**: Microsoft analyzer for English. \ + * **en.lucene**: Lucene analyzer for English. \ + * **et.microsoft**: Microsoft analyzer for Estonian. \ + * **fi.microsoft**: Microsoft analyzer for Finnish. \ + * **fi.lucene**: Lucene analyzer for Finnish. \ + * **fr.microsoft**: Microsoft analyzer for French. \ + * **fr.lucene**: Lucene analyzer for French. \ + * **gl.lucene**: Lucene analyzer for Galician. \ + * **de.microsoft**: Microsoft analyzer for German. \ + * **de.lucene**: Lucene analyzer for German. \ + * **el.microsoft**: Microsoft analyzer for Greek. \ + * **el.lucene**: Lucene analyzer for Greek. \ + * **gu.microsoft**: Microsoft analyzer for Gujarati. \ + * **he.microsoft**: Microsoft analyzer for Hebrew. \ + * **hi.microsoft**: Microsoft analyzer for Hindi. \ + * **hi.lucene**: Lucene analyzer for Hindi. \ + * **hu.microsoft**: Microsoft analyzer for Hungarian. \ + * **hu.lucene**: Lucene analyzer for Hungarian. \ + * **is.microsoft**: Microsoft analyzer for Icelandic. \ + * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \ + * **id.lucene**: Lucene analyzer for Indonesian. \ + * **ga.lucene**: Lucene analyzer for Irish. \ + * **it.microsoft**: Microsoft analyzer for Italian. \ + * **it.lucene**: Lucene analyzer for Italian. \ + * **ja.microsoft**: Microsoft analyzer for Japanese. \ + * **ja.lucene**: Lucene analyzer for Japanese. \ + * **kn.microsoft**: Microsoft analyzer for Kannada. \ + * **ko.microsoft**: Microsoft analyzer for Korean. \ + * **ko.lucene**: Lucene analyzer for Korean. \ + * **lv.microsoft**: Microsoft analyzer for Latvian. \ + * **lv.lucene**: Lucene analyzer for Latvian. \ + * **lt.microsoft**: Microsoft analyzer for Lithuanian. \ + * **ml.microsoft**: Microsoft analyzer for Malayalam. \ + * **ms.microsoft**: Microsoft analyzer for Malay (Latin). \ + * **mr.microsoft**: Microsoft analyzer for Marathi. \ + * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \ + * **no.lucene**: Lucene analyzer for Norwegian. \ + * **fa.lucene**: Lucene analyzer for Persian. \ + * **pl.microsoft**: Microsoft analyzer for Polish. \ + * **pl.lucene**: Lucene analyzer for Polish. \ + * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \ + * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \ + * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \ + * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \ + * **pa.microsoft**: Microsoft analyzer for Punjabi. \ + * **ro.microsoft**: Microsoft analyzer for Romanian. \ + * **ro.lucene**: Lucene analyzer for Romanian. \ + * **ru.microsoft**: Microsoft analyzer for Russian. \ + * **ru.lucene**: Lucene analyzer for Russian. \ + * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \ + * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \ + * **sk.microsoft**: Microsoft analyzer for Slovak. \ + * **sl.microsoft**: Microsoft analyzer for Slovenian. \ + * **es.microsoft**: Microsoft analyzer for Spanish. \ + * **es.lucene**: Lucene analyzer for Spanish. \ + * **sv.microsoft**: Microsoft analyzer for Swedish. \ + * **sv.lucene**: Lucene analyzer for Swedish. \ + * **ta.microsoft**: Microsoft analyzer for Tamil. \ + * **te.microsoft**: Microsoft analyzer for Telugu. \ + * **th.microsoft**: Microsoft analyzer for Thai. \ + * **th.lucene**: Lucene analyzer for Thai. \ + * **tr.microsoft**: Microsoft analyzer for Turkish. \ + * **tr.lucene**: Lucene analyzer for Turkish. \ + * **uk.microsoft**: Microsoft analyzer for Ukrainian. \ + * **ur.microsoft**: Microsoft analyzer for Urdu. \ + * **vi.microsoft**: Microsoft analyzer for Vietnamese. \ + * **standard.lucene**: Standard Lucene analyzer. \ + * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers \ + * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html \ + * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html \ + * **simple**: Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html \ + * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html \ + * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html + */ +export type LexicalAnalyzerName = string; + +/** Defines the names of all text normalizers supported by the search engine. */ +export enum KnownLexicalNormalizerName { + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */ + Elision = "elision", + /** Normalizes token text to lowercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Standard normalizer, which consists of lowercase and asciifolding. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */ + Standard = "standard", + /** Normalizes token text to uppercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */ + Uppercase = "uppercase", +} + +/** + * Defines the names of all text normalizers supported by the search engine. \ + * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **lowercase**: Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **uppercase**: Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html + */ +export type LexicalNormalizerName = string; + +/** The encoding format for interpreting vector field contents. */ +export enum KnownVectorEncodingFormat { + /** Encoding format representing bits packed into a wider data type. */ + PackedBit = "packedBit", +} + +/** + * The encoding format for interpreting vector field contents. \ + * {@link KnownVectorEncodingFormat} can be used interchangeably with VectorEncodingFormat, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **packedBit**: Encoding format representing bits packed into a wider data type. + */ +export type VectorEncodingFormat = string; + +export function scoringProfileArraySerializer(result: Array): any[] { + return result.map((item) => { + return scoringProfileSerializer(item); + }); +} + +export function scoringProfileArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return scoringProfileDeserializer(item); + }); +} + +/** Defines parameters for a search index that influence scoring in search queries. */ +export interface ScoringProfile { + /** The name of the scoring profile. */ + name: string; + /** Parameters that boost scoring based on text matches in certain index fields. */ + textWeights?: TextWeights; + /** The collection of functions that influence the scoring of documents. */ + functions?: ScoringFunctionUnion[]; + /** A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. */ + functionAggregation?: ScoringFunctionAggregation; +} + +export function scoringProfileSerializer(item: ScoringProfile): any { + return { + name: item["name"], + text: !item["textWeights"] ? item["textWeights"] : textWeightsSerializer(item["textWeights"]), + functions: !item["functions"] + ? item["functions"] + : scoringFunctionUnionArraySerializer(item["functions"]), + functionAggregation: item["functionAggregation"], + }; +} + +export function scoringProfileDeserializer(item: any): ScoringProfile { + return { + name: item["name"], + textWeights: !item["text"] ? item["text"] : textWeightsDeserializer(item["text"]), + functions: !item["functions"] + ? item["functions"] + : scoringFunctionUnionArrayDeserializer(item["functions"]), + functionAggregation: item["functionAggregation"], + }; +} + +/** Defines weights on index fields for which matches should boost scoring in search queries. */ +export interface TextWeights { + /** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */ + weights: Record; +} + +export function textWeightsSerializer(item: TextWeights): any { + return { weights: item["weights"] }; +} + +export function textWeightsDeserializer(item: any): TextWeights { + return { + weights: item["weights"], + }; +} + +export function scoringFunctionUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return scoringFunctionUnionSerializer(item); + }); +} + +export function scoringFunctionUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return scoringFunctionUnionDeserializer(item); + }); +} + +/** Base type for functions that can modify document scores during ranking. */ +export interface ScoringFunction { + /** The name of the field used as input to the scoring function. */ + fieldName: string; + /** A multiplier for the raw score. Must be a positive number not equal to 1.0. */ + boost: number; + /** A value indicating how boosting will be interpolated across document scores; defaults to "Linear". */ + interpolation?: ScoringFunctionInterpolation; + /** Type of ScoringFunction. */ + /** The discriminator possible values: distance, freshness, magnitude, tag */ + type: string; +} + +export function scoringFunctionSerializer(item: ScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + }; +} + +export function scoringFunctionDeserializer(item: any): ScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + }; +} + +/** Alias for ScoringFunctionUnion */ +export type ScoringFunctionUnion = + | DistanceScoringFunction + | FreshnessScoringFunction + | MagnitudeScoringFunction + | TagScoringFunction + | ScoringFunction; + +export function scoringFunctionUnionSerializer(item: ScoringFunctionUnion): any { + switch (item.type) { + case "distance": + return distanceScoringFunctionSerializer(item as DistanceScoringFunction); + + case "freshness": + return freshnessScoringFunctionSerializer(item as FreshnessScoringFunction); + + case "magnitude": + return magnitudeScoringFunctionSerializer(item as MagnitudeScoringFunction); + + case "tag": + return tagScoringFunctionSerializer(item as TagScoringFunction); + + default: + return scoringFunctionSerializer(item); + } +} + +export function scoringFunctionUnionDeserializer(item: any): ScoringFunctionUnion { + switch (item.type) { + case "distance": + return distanceScoringFunctionDeserializer(item as DistanceScoringFunction); + + case "freshness": + return freshnessScoringFunctionDeserializer(item as FreshnessScoringFunction); + + case "magnitude": + return magnitudeScoringFunctionDeserializer(item as MagnitudeScoringFunction); + + case "tag": + return tagScoringFunctionDeserializer(item as TagScoringFunction); + + default: + return scoringFunctionDeserializer(item); + } +} + +/** Defines the function used to interpolate score boosting across a range of documents. */ +export enum KnownScoringFunctionInterpolation { + /** Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. */ + Linear = "linear", + /** Boosts scores by a constant factor. */ + Constant = "constant", + /** Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. */ + Quadratic = "quadratic", + /** Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. */ + Logarithmic = "logarithmic", +} + +/** + * Defines the function used to interpolate score boosting across a range of documents. \ + * {@link KnownScoringFunctionInterpolation} can be used interchangeably with ScoringFunctionInterpolation, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **linear**: Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. \ + * **constant**: Boosts scores by a constant factor. \ + * **quadratic**: Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. \ + * **logarithmic**: Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + */ +export type ScoringFunctionInterpolation = string; + +/** Defines a function that boosts scores based on distance from a geographic location. */ +export interface DistanceScoringFunction extends ScoringFunction { + /** Parameter values for the distance scoring function. */ + parameters: DistanceScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "distance"; +} + +export function distanceScoringFunctionSerializer(item: DistanceScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + distance: distanceScoringParametersSerializer(item["parameters"]), + }; +} + +export function distanceScoringFunctionDeserializer(item: any): DistanceScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: distanceScoringParametersDeserializer(item["distance"]), + }; +} + +/** Provides parameter values to a distance scoring function. */ +export interface DistanceScoringParameters { + /** The name of the parameter passed in search queries to specify the reference location. */ + referencePointParameter: string; + /** The distance in kilometers from the reference location where the boosting range ends. */ + boostingDistance: number; +} + +export function distanceScoringParametersSerializer(item: DistanceScoringParameters): any { + return { + referencePointParameter: item["referencePointParameter"], + boostingDistance: item["boostingDistance"], + }; +} + +export function distanceScoringParametersDeserializer(item: any): DistanceScoringParameters { + return { + referencePointParameter: item["referencePointParameter"], + boostingDistance: item["boostingDistance"], + }; +} + +/** Defines a function that boosts scores based on the value of a date-time field. */ +export interface FreshnessScoringFunction extends ScoringFunction { + /** Parameter values for the freshness scoring function. */ + parameters: FreshnessScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "freshness"; +} + +export function freshnessScoringFunctionSerializer(item: FreshnessScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + freshness: freshnessScoringParametersSerializer(item["parameters"]), + }; +} + +export function freshnessScoringFunctionDeserializer(item: any): FreshnessScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: freshnessScoringParametersDeserializer(item["freshness"]), + }; +} + +/** Provides parameter values to a freshness scoring function. */ +export interface FreshnessScoringParameters { + /** The expiration period after which boosting will stop for a particular document. */ + boostingDuration: string; +} + +export function freshnessScoringParametersSerializer(item: FreshnessScoringParameters): any { + return { boostingDuration: item["boostingDuration"] }; +} + +export function freshnessScoringParametersDeserializer(item: any): FreshnessScoringParameters { + return { + boostingDuration: item["boostingDuration"], + }; +} + +/** Defines a function that boosts scores based on the magnitude of a numeric field. */ +export interface MagnitudeScoringFunction extends ScoringFunction { + /** Parameter values for the magnitude scoring function. */ + parameters: MagnitudeScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "magnitude"; +} + +export function magnitudeScoringFunctionSerializer(item: MagnitudeScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + magnitude: magnitudeScoringParametersSerializer(item["parameters"]), + }; +} + +export function magnitudeScoringFunctionDeserializer(item: any): MagnitudeScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: magnitudeScoringParametersDeserializer(item["magnitude"]), + }; +} + +/** Provides parameter values to a magnitude scoring function. */ +export interface MagnitudeScoringParameters { + /** The field value at which boosting starts. */ + boostingRangeStart: number; + /** The field value at which boosting ends. */ + boostingRangeEnd: number; + /** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */ + shouldBoostBeyondRangeByConstant?: boolean; +} + +export function magnitudeScoringParametersSerializer(item: MagnitudeScoringParameters): any { + return { + boostingRangeStart: item["boostingRangeStart"], + boostingRangeEnd: item["boostingRangeEnd"], + constantBoostBeyondRange: item["shouldBoostBeyondRangeByConstant"], + }; +} + +export function magnitudeScoringParametersDeserializer(item: any): MagnitudeScoringParameters { + return { + boostingRangeStart: item["boostingRangeStart"], + boostingRangeEnd: item["boostingRangeEnd"], + shouldBoostBeyondRangeByConstant: item["constantBoostBeyondRange"], + }; +} + +/** Defines a function that boosts scores of documents with string values matching a given list of tags. */ +export interface TagScoringFunction extends ScoringFunction { + /** Parameter values for the tag scoring function. */ + parameters: TagScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "tag"; +} + +export function tagScoringFunctionSerializer(item: TagScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + tag: tagScoringParametersSerializer(item["parameters"]), + }; +} + +export function tagScoringFunctionDeserializer(item: any): TagScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: tagScoringParametersDeserializer(item["tag"]), + }; +} + +/** Provides parameter values to a tag scoring function. */ +export interface TagScoringParameters { + /** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */ + tagsParameter: string; +} + +export function tagScoringParametersSerializer(item: TagScoringParameters): any { + return { tagsParameter: item["tagsParameter"] }; +} + +export function tagScoringParametersDeserializer(item: any): TagScoringParameters { + return { + tagsParameter: item["tagsParameter"], + }; +} + +/** Defines the aggregation function used to combine the results of all the scoring functions in a scoring profile. */ +export enum KnownScoringFunctionAggregation { + /** Boost scores by the sum of all scoring function results. */ + Sum = "sum", + /** Boost scores by the average of all scoring function results. */ + Average = "average", + /** Boost scores by the minimum of all scoring function results. */ + Minimum = "minimum", + /** Boost scores by the maximum of all scoring function results. */ + Maximum = "maximum", + /** Boost scores using the first applicable scoring function in the scoring profile. */ + FirstMatching = "firstMatching", +} + +/** + * Defines the aggregation function used to combine the results of all the scoring functions in a scoring profile. \ + * {@link KnownScoringFunctionAggregation} can be used interchangeably with ScoringFunctionAggregation, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **sum**: Boost scores by the sum of all scoring function results. \ + * **average**: Boost scores by the average of all scoring function results. \ + * **minimum**: Boost scores by the minimum of all scoring function results. \ + * **maximum**: Boost scores by the maximum of all scoring function results. \ + * **firstMatching**: Boost scores using the first applicable scoring function in the scoring profile. + */ +export type ScoringFunctionAggregation = string; + +/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */ +export interface CorsOptions { + /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */ + allowedOrigins: string[]; + /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */ + maxAgeInSeconds?: number; +} + +export function corsOptionsSerializer(item: CorsOptions): any { + return { + allowedOrigins: item["allowedOrigins"].map((p: any) => { + return p; + }), + maxAgeInSeconds: item["maxAgeInSeconds"], + }; +} + +export function corsOptionsDeserializer(item: any): CorsOptions { + return { + allowedOrigins: item["allowedOrigins"].map((p: any) => { + return p; + }), + maxAgeInSeconds: item["maxAgeInSeconds"], + }; +} + +export function searchSuggesterArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchSuggesterSerializer(item); + }); +} + +export function searchSuggesterArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchSuggesterDeserializer(item); + }); +} + +/** Defines how the Suggest API should apply to a group of fields in the index. */ +export interface SearchSuggester { + /** The name of the suggester. */ + name: string; + /** A value indicating the capabilities of the suggester. */ + searchMode: "analyzingInfixMatching"; + /** The list of field names to which the suggester applies. Each field must be searchable. */ + sourceFields: string[]; +} + +export function searchSuggesterSerializer(item: SearchSuggester): any { + return { + name: item["name"], + searchMode: item["searchMode"], + sourceFields: item["sourceFields"].map((p: any) => { + return p; + }), + }; +} + +export function searchSuggesterDeserializer(item: any): SearchSuggester { + return { + name: item["name"], + searchMode: item["searchMode"], + sourceFields: item["sourceFields"].map((p: any) => { + return p; + }), + }; +} + +export function lexicalAnalyzerUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return lexicalAnalyzerUnionSerializer(item); + }); +} + +export function lexicalAnalyzerUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return lexicalAnalyzerUnionDeserializer(item); + }); +} + +/** Base type for analyzers. */ +export interface LexicalAnalyzer { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.CustomAnalyzer, #Microsoft.Azure.Search.PatternAnalyzer, #Microsoft.Azure.Search.StandardAnalyzer, #Microsoft.Azure.Search.StopAnalyzer */ + odatatype: string; + /** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function lexicalAnalyzerSerializer(item: LexicalAnalyzer): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function lexicalAnalyzerDeserializer(item: any): LexicalAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for LexicalAnalyzerUnion */ +export type LexicalAnalyzerUnion = + | CustomAnalyzer + | PatternAnalyzer + | LuceneStandardAnalyzer + | StopAnalyzer + | LexicalAnalyzer; + +export function lexicalAnalyzerUnionSerializer(item: LexicalAnalyzerUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomAnalyzer": + return customAnalyzerSerializer(item as CustomAnalyzer); + + case "#Microsoft.Azure.Search.PatternAnalyzer": + return patternAnalyzerSerializer(item as PatternAnalyzer); + + case "#Microsoft.Azure.Search.StandardAnalyzer": + return luceneStandardAnalyzerSerializer(item as LuceneStandardAnalyzer); + + case "#Microsoft.Azure.Search.StopAnalyzer": + return stopAnalyzerSerializer(item as StopAnalyzer); + + default: + return lexicalAnalyzerSerializer(item); + } +} + +export function lexicalAnalyzerUnionDeserializer(item: any): LexicalAnalyzerUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomAnalyzer": + return customAnalyzerDeserializer(item as CustomAnalyzer); + + case "#Microsoft.Azure.Search.PatternAnalyzer": + return patternAnalyzerDeserializer(item as PatternAnalyzer); + + case "#Microsoft.Azure.Search.StandardAnalyzer": + return luceneStandardAnalyzerDeserializer(item as LuceneStandardAnalyzer); + + case "#Microsoft.Azure.Search.StopAnalyzer": + return stopAnalyzerDeserializer(item as StopAnalyzer); + + default: + return lexicalAnalyzerDeserializer(item); + } +} + +/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */ +export interface CustomAnalyzer extends LexicalAnalyzer { + /** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. */ + tokenizer: LexicalTokenizerName; + /** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ + tokenFilters?: TokenFilterName[]; + /** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ + charFilters?: CharFilterName[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; +} + +export function customAnalyzerSerializer(item: CustomAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + tokenizer: item["tokenizer"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +export function customAnalyzerDeserializer(item: any): CustomAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + tokenizer: item["tokenizer"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +/** Defines the names of all tokenizers supported by the search engine. */ +export enum KnownLexicalTokenizerName { + /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html */ + Classic = "classic", + /** Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html */ + EdgeNGram = "edgeNGram", + /** Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html */ + Keyword = "keyword_v2", + /** Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html */ + Letter = "letter", + /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html */ + Lowercase = "lowercase", + /** Divides text using language-specific rules. */ + MicrosoftLanguageTokenizer = "microsoft_language_tokenizer", + /** Divides text using language-specific rules and reduces words to their base forms. */ + MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer", + /** Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html */ + NGram = "nGram", + /** Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html */ + PathHierarchy = "path_hierarchy_v2", + /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html */ + Pattern = "pattern", + /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html */ + Standard = "standard_v2", + /** Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html */ + UaxUrlEmail = "uax_url_email", + /** Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html */ + Whitespace = "whitespace", +} + +/** + * Defines the names of all tokenizers supported by the search engine. \ + * {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html \ + * **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html \ + * **keyword_v2**: Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html \ + * **letter**: Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html \ + * **lowercase**: Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html \ + * **microsoft_language_tokenizer**: Divides text using language-specific rules. \ + * **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \ + * **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html \ + * **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html \ + * **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html \ + * **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html \ + * **uax_url_email**: Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html \ + * **whitespace**: Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html + */ +export type LexicalTokenizerName = string; + +/** Defines the names of all token filters supported by the search engine. */ +export enum KnownTokenFilterName { + /** A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html */ + ArabicNormalization = "arabic_normalization", + /** Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html */ + Apostrophe = "apostrophe", + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html */ + CjkBigram = "cjk_bigram", + /** Normalizes CJK width differences. Folds full-width ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html */ + CjkWidth = "cjk_width", + /** Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html */ + Classic = "classic", + /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html */ + CommonGram = "common_grams", + /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html */ + EdgeNGram = "edgeNGram_v2", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */ + Elision = "elision", + /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html */ + GermanNormalization = "german_normalization", + /** Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html */ + HindiNormalization = "hindi_normalization", + /** Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html */ + IndicNormalization = "indic_normalization", + /** Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html */ + KeywordRepeat = "keyword_repeat", + /** A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html */ + KStem = "kstem", + /** Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html */ + Length = "length", + /** Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html */ + Limit = "limit", + /** Normalizes token text to lower case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html */ + NGram = "nGram_v2", + /** Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html */ + PersianNormalization = "persian_normalization", + /** Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html */ + Phonetic = "phonetic", + /** Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer */ + PorterStem = "porter_stem", + /** Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */ + Reverse = "reverse", + /** Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html */ + ScandinavianNormalization = "scandinavian_normalization", + /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html */ + ScandinavianFoldingNormalization = "scandinavian_folding", + /** Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html */ + Shingle = "shingle", + /** A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html */ + Snowball = "snowball", + /** Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html */ + SoraniNormalization = "sorani_normalization", + /** Language specific stemming filter. See https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters */ + Stemmer = "stemmer", + /** Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html */ + Stopwords = "stopwords", + /** Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html */ + Trim = "trim", + /** Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html */ + Truncate = "truncate", + /** Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html */ + Unique = "unique", + /** Normalizes token text to upper case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */ + Uppercase = "uppercase", + /** Splits words into subwords and performs optional transformations on subword groups. */ + WordDelimiter = "word_delimiter", +} + +/** + * Defines the names of all token filters supported by the search engine. \ + * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ + * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ + * **cjk_width**: Normalizes CJK width differences. Folds full-width ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ + * **classic**: Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ + * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ + * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ + * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ + * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ + * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ + * **kstem**: A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ + * **length**: Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ + * **limit**: Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ + * **lowercase**: Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **nGram_v2**: Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ + * **persian_normalization**: Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ + * **phonetic**: Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ + * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer \ + * **reverse**: Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ + * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ + * **shingle**: Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ + * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ + * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ + * **stemmer**: Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ + * **stopwords**: Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ + * **trim**: Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ + * **truncate**: Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ + * **unique**: Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ + * **uppercase**: Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ + * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. + */ +export type TokenFilterName = string; + +/** Defines the names of all character filters supported by the search engine. */ +export enum KnownCharFilterName { + /** A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html */ + HtmlStrip = "html_strip", +} + +/** + * Defines the names of all character filters supported by the search engine. \ + * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html + */ +export type CharFilterName = string; + +/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */ +export interface PatternAnalyzer extends LexicalAnalyzer { + /** A value indicating whether terms should be lower-cased. Default is true. */ + lowerCaseTerms?: boolean; + /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ + pattern?: string; + /** Regular expression flags. */ + flags?: RegexFlags; + /** A list of stopwords. */ + stopwords?: string[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.PatternAnalyzer"; +} + +export function patternAnalyzerSerializer(item: PatternAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + lowercase: item["lowerCaseTerms"], + pattern: item["pattern"], + flags: item["flags"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function patternAnalyzerDeserializer(item: any): PatternAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + lowerCaseTerms: item["lowercase"], + pattern: item["pattern"], + flags: item["flags"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +/** Defines flags that can be combined to control how regular expressions are used in the pattern analyzer and pattern tokenizer. */ +export enum KnownRegexFlags { + /** Enables canonical equivalence. */ + CanonEq = "CANON_EQ", + /** Enables case-insensitive matching. */ + CaseInsensitive = "CASE_INSENSITIVE", + /** Permits whitespace and comments in the pattern. */ + Comments = "COMMENTS", + /** Enables dotall mode. */ + DotAll = "DOTALL", + /** Enables literal parsing of the pattern. */ + Literal = "LITERAL", + /** Enables multiline mode. */ + Multiline = "MULTILINE", + /** Enables Unicode-aware case folding. */ + UnicodeCase = "UNICODE_CASE", + /** Enables Unix lines mode. */ + UnixLines = "UNIX_LINES", +} + +/** + * Defines flags that can be combined to control how regular expressions are used in the pattern analyzer and pattern tokenizer. \ + * {@link KnownRegexFlags} can be used interchangeably with RegexFlags, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **CANON_EQ**: Enables canonical equivalence. \ + * **CASE_INSENSITIVE**: Enables case-insensitive matching. \ + * **COMMENTS**: Permits whitespace and comments in the pattern. \ + * **DOTALL**: Enables dotall mode. \ + * **LITERAL**: Enables literal parsing of the pattern. \ + * **MULTILINE**: Enables multiline mode. \ + * **UNICODE_CASE**: Enables Unicode-aware case folding. \ + * **UNIX_LINES**: Enables Unix lines mode. + */ +export type RegexFlags = string; + +/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */ +export interface LuceneStandardAnalyzer extends LexicalAnalyzer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A list of stopwords. */ + stopwords?: string[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; +} + +export function luceneStandardAnalyzerSerializer(item: LuceneStandardAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function luceneStandardAnalyzerDeserializer(item: any): LuceneStandardAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */ +export interface StopAnalyzer extends LexicalAnalyzer { + /** A list of stopwords. */ + stopwords?: string[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; +} + +export function stopAnalyzerSerializer(item: StopAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function stopAnalyzerDeserializer(item: any): StopAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function lexicalTokenizerUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return lexicalTokenizerUnionSerializer(item); + }); +} + +export function lexicalTokenizerUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return lexicalTokenizerUnionDeserializer(item); + }); +} + +/** Base type for tokenizers. */ +export interface LexicalTokenizer { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.ClassicTokenizer, #Microsoft.Azure.Search.EdgeNGramTokenizer, #Microsoft.Azure.Search.KeywordTokenizerV2, #Microsoft.Azure.Search.MicrosoftLanguageTokenizer, #Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer, #Microsoft.Azure.Search.NGramTokenizer, #Microsoft.Azure.Search.PathHierarchyTokenizerV2, #Microsoft.Azure.Search.PatternTokenizer, #Microsoft.Azure.Search.StandardTokenizerV2, #Microsoft.Azure.Search.UaxUrlEmailTokenizer */ + odatatype: string; + /** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function lexicalTokenizerSerializer(item: LexicalTokenizer): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function lexicalTokenizerDeserializer(item: any): LexicalTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for LexicalTokenizerUnion */ +export type LexicalTokenizerUnion = + | ClassicTokenizer + | EdgeNGramTokenizer + | KeywordTokenizer + | MicrosoftLanguageTokenizer + | MicrosoftLanguageStemmingTokenizer + | NGramTokenizer + | PathHierarchyTokenizer + | PatternTokenizer + | LuceneStandardTokenizer + | UaxUrlEmailTokenizer + | LexicalTokenizer; + +export function lexicalTokenizerUnionSerializer(item: LexicalTokenizerUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicTokenizer": + return classicTokenizerSerializer(item as ClassicTokenizer); + + case "#Microsoft.Azure.Search.EdgeNGramTokenizer": + return edgeNGramTokenizerSerializer(item as EdgeNGramTokenizer); + + case "#Microsoft.Azure.Search.KeywordTokenizerV2": + return keywordTokenizerSerializer(item as KeywordTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": + return microsoftLanguageTokenizerSerializer(item as MicrosoftLanguageTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": + return microsoftLanguageStemmingTokenizerSerializer( + item as MicrosoftLanguageStemmingTokenizer, + ); + + case "#Microsoft.Azure.Search.NGramTokenizer": + return nGramTokenizerSerializer(item as NGramTokenizer); + + case "#Microsoft.Azure.Search.PathHierarchyTokenizerV2": + return pathHierarchyTokenizerSerializer(item as PathHierarchyTokenizer); + + case "#Microsoft.Azure.Search.PatternTokenizer": + return patternTokenizerSerializer(item as PatternTokenizer); + + case "#Microsoft.Azure.Search.StandardTokenizerV2": + return luceneStandardTokenizerSerializer(item as LuceneStandardTokenizer); + + case "#Microsoft.Azure.Search.UaxUrlEmailTokenizer": + return uaxUrlEmailTokenizerSerializer(item as UaxUrlEmailTokenizer); + + default: + return lexicalTokenizerSerializer(item); + } +} + +export function lexicalTokenizerUnionDeserializer(item: any): LexicalTokenizerUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicTokenizer": + return classicTokenizerDeserializer(item as ClassicTokenizer); + + case "#Microsoft.Azure.Search.EdgeNGramTokenizer": + return edgeNGramTokenizerDeserializer(item as EdgeNGramTokenizer); + + case "#Microsoft.Azure.Search.KeywordTokenizerV2": + return keywordTokenizerDeserializer(item as KeywordTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": + return microsoftLanguageTokenizerDeserializer(item as MicrosoftLanguageTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": + return microsoftLanguageStemmingTokenizerDeserializer( + item as MicrosoftLanguageStemmingTokenizer, + ); + + case "#Microsoft.Azure.Search.NGramTokenizer": + return nGramTokenizerDeserializer(item as NGramTokenizer); + + case "#Microsoft.Azure.Search.PathHierarchyTokenizerV2": + return pathHierarchyTokenizerDeserializer(item as PathHierarchyTokenizer); + + case "#Microsoft.Azure.Search.PatternTokenizer": + return patternTokenizerDeserializer(item as PatternTokenizer); + + case "#Microsoft.Azure.Search.StandardTokenizerV2": + return luceneStandardTokenizerDeserializer(item as LuceneStandardTokenizer); + + case "#Microsoft.Azure.Search.UaxUrlEmailTokenizer": + return uaxUrlEmailTokenizerDeserializer(item as UaxUrlEmailTokenizer); + + default: + return lexicalTokenizerDeserializer(item); + } +} + +/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */ +export interface ClassicTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; +} + +export function classicTokenizerSerializer(item: ClassicTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function classicTokenizerDeserializer(item: any): ClassicTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ +export interface EdgeNGramTokenizer extends LexicalTokenizer { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Character classes to keep in the tokens. */ + tokenChars?: TokenCharacterKind[]; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; +} + +export function edgeNGramTokenizerSerializer(item: EdgeNGramTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +export function edgeNGramTokenizerDeserializer(item: any): EdgeNGramTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +/** Represents classes of characters on which a token filter can operate. */ +export enum KnownTokenCharacterKind { + /** Keeps letters in tokens. */ + Letter = "letter", + /** Keeps digits in tokens. */ + Digit = "digit", + /** Keeps whitespace in tokens. */ + Whitespace = "whitespace", + /** Keeps punctuation in tokens. */ + Punctuation = "punctuation", + /** Keeps symbols in tokens. */ + Symbol = "symbol", +} + +/** + * Represents classes of characters on which a token filter can operate. \ + * {@link KnownTokenCharacterKind} can be used interchangeably with TokenCharacterKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **letter**: Keeps letters in tokens. \ + * **digit**: Keeps digits in tokens. \ + * **whitespace**: Keeps whitespace in tokens. \ + * **punctuation**: Keeps punctuation in tokens. \ + * **symbol**: Keeps symbols in tokens. + */ +export type TokenCharacterKind = string; + +/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */ +export interface KeywordTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2"; +} + +export function keywordTokenizerSerializer(item: KeywordTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function keywordTokenizerDeserializer(item: any): KeywordTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +/** Divides text using language-specific rules. */ +export interface MicrosoftLanguageTokenizer extends LexicalTokenizer { + /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ + maxTokenLength?: number; + /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ + isSearchTokenizer?: boolean; + /** The language to use. The default is English. */ + language?: MicrosoftTokenizerLanguage; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; +} + +export function microsoftLanguageTokenizerSerializer(item: MicrosoftLanguageTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +export function microsoftLanguageTokenizerDeserializer(item: any): MicrosoftLanguageTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +/** Lists the languages supported by the Microsoft language tokenizer. */ +export enum KnownMicrosoftTokenizerLanguage { + /** Selects the Microsoft tokenizer for Bangla. */ + Bangla = "bangla", + /** Selects the Microsoft tokenizer for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the Microsoft tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Microsoft tokenizer for Chinese (Simplified). */ + ChineseSimplified = "chineseSimplified", + /** Selects the Microsoft tokenizer for Chinese (Traditional). */ + ChineseTraditional = "chineseTraditional", + /** Selects the Microsoft tokenizer for Croatian. */ + Croatian = "croatian", + /** Selects the Microsoft tokenizer for Czech. */ + Czech = "czech", + /** Selects the Microsoft tokenizer for Danish. */ + Danish = "danish", + /** Selects the Microsoft tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Microsoft tokenizer for English. */ + English = "english", + /** Selects the Microsoft tokenizer for French. */ + French = "french", + /** Selects the Microsoft tokenizer for German. */ + German = "german", + /** Selects the Microsoft tokenizer for Greek. */ + Greek = "greek", + /** Selects the Microsoft tokenizer for Gujarati. */ + Gujarati = "gujarati", + /** Selects the Microsoft tokenizer for Hindi. */ + Hindi = "hindi", + /** Selects the Microsoft tokenizer for Icelandic. */ + Icelandic = "icelandic", + /** Selects the Microsoft tokenizer for Indonesian. */ + Indonesian = "indonesian", + /** Selects the Microsoft tokenizer for Italian. */ + Italian = "italian", + /** Selects the Microsoft tokenizer for Japanese. */ + Japanese = "japanese", + /** Selects the Microsoft tokenizer for Kannada. */ + Kannada = "kannada", + /** Selects the Microsoft tokenizer for Korean. */ + Korean = "korean", + /** Selects the Microsoft tokenizer for Malay. */ + Malay = "malay", + /** Selects the Microsoft tokenizer for Malayalam. */ + Malayalam = "malayalam", + /** Selects the Microsoft tokenizer for Marathi. */ + Marathi = "marathi", + /** Selects the Microsoft tokenizer for Norwegian (Bokmål). */ + NorwegianBokmaal = "norwegianBokmaal", + /** Selects the Microsoft tokenizer for Polish. */ + Polish = "polish", + /** Selects the Microsoft tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Microsoft tokenizer for Portuguese (Brazil). */ + PortugueseBrazilian = "portugueseBrazilian", + /** Selects the Microsoft tokenizer for Punjabi. */ + Punjabi = "punjabi", + /** Selects the Microsoft tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Microsoft tokenizer for Russian. */ + Russian = "russian", + /** Selects the Microsoft tokenizer for Serbian (Cyrillic). */ + SerbianCyrillic = "serbianCyrillic", + /** Selects the Microsoft tokenizer for Serbian (Latin). */ + SerbianLatin = "serbianLatin", + /** Selects the Microsoft tokenizer for Slovenian. */ + Slovenian = "slovenian", + /** Selects the Microsoft tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Microsoft tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Microsoft tokenizer for Tamil. */ + Tamil = "tamil", + /** Selects the Microsoft tokenizer for Telugu. */ + Telugu = "telugu", + /** Selects the Microsoft tokenizer for Thai. */ + Thai = "thai", + /** Selects the Microsoft tokenizer for Ukrainian. */ + Ukrainian = "ukrainian", + /** Selects the Microsoft tokenizer for Urdu. */ + Urdu = "urdu", + /** Selects the Microsoft tokenizer for Vietnamese. */ + Vietnamese = "vietnamese", +} + +/** + * Lists the languages supported by the Microsoft language tokenizer. \ + * {@link KnownMicrosoftTokenizerLanguage} can be used interchangeably with MicrosoftTokenizerLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **bangla**: Selects the Microsoft tokenizer for Bangla. \ + * **bulgarian**: Selects the Microsoft tokenizer for Bulgarian. \ + * **catalan**: Selects the Microsoft tokenizer for Catalan. \ + * **chineseSimplified**: Selects the Microsoft tokenizer for Chinese (Simplified). \ + * **chineseTraditional**: Selects the Microsoft tokenizer for Chinese (Traditional). \ + * **croatian**: Selects the Microsoft tokenizer for Croatian. \ + * **czech**: Selects the Microsoft tokenizer for Czech. \ + * **danish**: Selects the Microsoft tokenizer for Danish. \ + * **dutch**: Selects the Microsoft tokenizer for Dutch. \ + * **english**: Selects the Microsoft tokenizer for English. \ + * **french**: Selects the Microsoft tokenizer for French. \ + * **german**: Selects the Microsoft tokenizer for German. \ + * **greek**: Selects the Microsoft tokenizer for Greek. \ + * **gujarati**: Selects the Microsoft tokenizer for Gujarati. \ + * **hindi**: Selects the Microsoft tokenizer for Hindi. \ + * **icelandic**: Selects the Microsoft tokenizer for Icelandic. \ + * **indonesian**: Selects the Microsoft tokenizer for Indonesian. \ + * **italian**: Selects the Microsoft tokenizer for Italian. \ + * **japanese**: Selects the Microsoft tokenizer for Japanese. \ + * **kannada**: Selects the Microsoft tokenizer for Kannada. \ + * **korean**: Selects the Microsoft tokenizer for Korean. \ + * **malay**: Selects the Microsoft tokenizer for Malay. \ + * **malayalam**: Selects the Microsoft tokenizer for Malayalam. \ + * **marathi**: Selects the Microsoft tokenizer for Marathi. \ + * **norwegianBokmaal**: Selects the Microsoft tokenizer for Norwegian (Bokmål). \ + * **polish**: Selects the Microsoft tokenizer for Polish. \ + * **portuguese**: Selects the Microsoft tokenizer for Portuguese. \ + * **portugueseBrazilian**: Selects the Microsoft tokenizer for Portuguese (Brazil). \ + * **punjabi**: Selects the Microsoft tokenizer for Punjabi. \ + * **romanian**: Selects the Microsoft tokenizer for Romanian. \ + * **russian**: Selects the Microsoft tokenizer for Russian. \ + * **serbianCyrillic**: Selects the Microsoft tokenizer for Serbian (Cyrillic). \ + * **serbianLatin**: Selects the Microsoft tokenizer for Serbian (Latin). \ + * **slovenian**: Selects the Microsoft tokenizer for Slovenian. \ + * **spanish**: Selects the Microsoft tokenizer for Spanish. \ + * **swedish**: Selects the Microsoft tokenizer for Swedish. \ + * **tamil**: Selects the Microsoft tokenizer for Tamil. \ + * **telugu**: Selects the Microsoft tokenizer for Telugu. \ + * **thai**: Selects the Microsoft tokenizer for Thai. \ + * **ukrainian**: Selects the Microsoft tokenizer for Ukrainian. \ + * **urdu**: Selects the Microsoft tokenizer for Urdu. \ + * **vietnamese**: Selects the Microsoft tokenizer for Vietnamese. + */ +export type MicrosoftTokenizerLanguage = string; + +/** Divides text using language-specific rules and reduces words to their base forms. */ +export interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer { + /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ + maxTokenLength?: number; + /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ + isSearchTokenizer?: boolean; + /** The language to use. The default is English. */ + language?: MicrosoftStemmingTokenizerLanguage; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; +} + +export function microsoftLanguageStemmingTokenizerSerializer( + item: MicrosoftLanguageStemmingTokenizer, +): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +export function microsoftLanguageStemmingTokenizerDeserializer( + item: any, +): MicrosoftLanguageStemmingTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +/** Lists the languages supported by the Microsoft language stemming tokenizer. */ +export enum KnownMicrosoftStemmingTokenizerLanguage { + /** Selects the Microsoft stemming tokenizer for Arabic. */ + Arabic = "arabic", + /** Selects the Microsoft stemming tokenizer for Bangla. */ + Bangla = "bangla", + /** Selects the Microsoft stemming tokenizer for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the Microsoft stemming tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Microsoft stemming tokenizer for Croatian. */ + Croatian = "croatian", + /** Selects the Microsoft stemming tokenizer for Czech. */ + Czech = "czech", + /** Selects the Microsoft stemming tokenizer for Danish. */ + Danish = "danish", + /** Selects the Microsoft stemming tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Microsoft stemming tokenizer for English. */ + English = "english", + /** Selects the Microsoft stemming tokenizer for Estonian. */ + Estonian = "estonian", + /** Selects the Microsoft stemming tokenizer for Finnish. */ + Finnish = "finnish", + /** Selects the Microsoft stemming tokenizer for French. */ + French = "french", + /** Selects the Microsoft stemming tokenizer for German. */ + German = "german", + /** Selects the Microsoft stemming tokenizer for Greek. */ + Greek = "greek", + /** Selects the Microsoft stemming tokenizer for Gujarati. */ + Gujarati = "gujarati", + /** Selects the Microsoft stemming tokenizer for Hebrew. */ + Hebrew = "hebrew", + /** Selects the Microsoft stemming tokenizer for Hindi. */ + Hindi = "hindi", + /** Selects the Microsoft stemming tokenizer for Hungarian. */ + Hungarian = "hungarian", + /** Selects the Microsoft stemming tokenizer for Icelandic. */ + Icelandic = "icelandic", + /** Selects the Microsoft stemming tokenizer for Indonesian. */ + Indonesian = "indonesian", + /** Selects the Microsoft stemming tokenizer for Italian. */ + Italian = "italian", + /** Selects the Microsoft stemming tokenizer for Kannada. */ + Kannada = "kannada", + /** Selects the Microsoft stemming tokenizer for Latvian. */ + Latvian = "latvian", + /** Selects the Microsoft stemming tokenizer for Lithuanian. */ + Lithuanian = "lithuanian", + /** Selects the Microsoft stemming tokenizer for Malay. */ + Malay = "malay", + /** Selects the Microsoft stemming tokenizer for Malayalam. */ + Malayalam = "malayalam", + /** Selects the Microsoft stemming tokenizer for Marathi. */ + Marathi = "marathi", + /** Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). */ + NorwegianBokmaal = "norwegianBokmaal", + /** Selects the Microsoft stemming tokenizer for Polish. */ + Polish = "polish", + /** Selects the Microsoft stemming tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Microsoft stemming tokenizer for Portuguese (Brazil). */ + PortugueseBrazilian = "portugueseBrazilian", + /** Selects the Microsoft stemming tokenizer for Punjabi. */ + Punjabi = "punjabi", + /** Selects the Microsoft stemming tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Microsoft stemming tokenizer for Russian. */ + Russian = "russian", + /** Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). */ + SerbianCyrillic = "serbianCyrillic", + /** Selects the Microsoft stemming tokenizer for Serbian (Latin). */ + SerbianLatin = "serbianLatin", + /** Selects the Microsoft stemming tokenizer for Slovak. */ + Slovak = "slovak", + /** Selects the Microsoft stemming tokenizer for Slovenian. */ + Slovenian = "slovenian", + /** Selects the Microsoft stemming tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Microsoft stemming tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Microsoft stemming tokenizer for Tamil. */ + Tamil = "tamil", + /** Selects the Microsoft stemming tokenizer for Telugu. */ + Telugu = "telugu", + /** Selects the Microsoft stemming tokenizer for Turkish. */ + Turkish = "turkish", + /** Selects the Microsoft stemming tokenizer for Ukrainian. */ + Ukrainian = "ukrainian", + /** Selects the Microsoft stemming tokenizer for Urdu. */ + Urdu = "urdu", +} + +/** + * Lists the languages supported by the Microsoft language stemming tokenizer. \ + * {@link KnownMicrosoftStemmingTokenizerLanguage} can be used interchangeably with MicrosoftStemmingTokenizerLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic**: Selects the Microsoft stemming tokenizer for Arabic. \ + * **bangla**: Selects the Microsoft stemming tokenizer for Bangla. \ + * **bulgarian**: Selects the Microsoft stemming tokenizer for Bulgarian. \ + * **catalan**: Selects the Microsoft stemming tokenizer for Catalan. \ + * **croatian**: Selects the Microsoft stemming tokenizer for Croatian. \ + * **czech**: Selects the Microsoft stemming tokenizer for Czech. \ + * **danish**: Selects the Microsoft stemming tokenizer for Danish. \ + * **dutch**: Selects the Microsoft stemming tokenizer for Dutch. \ + * **english**: Selects the Microsoft stemming tokenizer for English. \ + * **estonian**: Selects the Microsoft stemming tokenizer for Estonian. \ + * **finnish**: Selects the Microsoft stemming tokenizer for Finnish. \ + * **french**: Selects the Microsoft stemming tokenizer for French. \ + * **german**: Selects the Microsoft stemming tokenizer for German. \ + * **greek**: Selects the Microsoft stemming tokenizer for Greek. \ + * **gujarati**: Selects the Microsoft stemming tokenizer for Gujarati. \ + * **hebrew**: Selects the Microsoft stemming tokenizer for Hebrew. \ + * **hindi**: Selects the Microsoft stemming tokenizer for Hindi. \ + * **hungarian**: Selects the Microsoft stemming tokenizer for Hungarian. \ + * **icelandic**: Selects the Microsoft stemming tokenizer for Icelandic. \ + * **indonesian**: Selects the Microsoft stemming tokenizer for Indonesian. \ + * **italian**: Selects the Microsoft stemming tokenizer for Italian. \ + * **kannada**: Selects the Microsoft stemming tokenizer for Kannada. \ + * **latvian**: Selects the Microsoft stemming tokenizer for Latvian. \ + * **lithuanian**: Selects the Microsoft stemming tokenizer for Lithuanian. \ + * **malay**: Selects the Microsoft stemming tokenizer for Malay. \ + * **malayalam**: Selects the Microsoft stemming tokenizer for Malayalam. \ + * **marathi**: Selects the Microsoft stemming tokenizer for Marathi. \ + * **norwegianBokmaal**: Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). \ + * **polish**: Selects the Microsoft stemming tokenizer for Polish. \ + * **portuguese**: Selects the Microsoft stemming tokenizer for Portuguese. \ + * **portugueseBrazilian**: Selects the Microsoft stemming tokenizer for Portuguese (Brazil). \ + * **punjabi**: Selects the Microsoft stemming tokenizer for Punjabi. \ + * **romanian**: Selects the Microsoft stemming tokenizer for Romanian. \ + * **russian**: Selects the Microsoft stemming tokenizer for Russian. \ + * **serbianCyrillic**: Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). \ + * **serbianLatin**: Selects the Microsoft stemming tokenizer for Serbian (Latin). \ + * **slovak**: Selects the Microsoft stemming tokenizer for Slovak. \ + * **slovenian**: Selects the Microsoft stemming tokenizer for Slovenian. \ + * **spanish**: Selects the Microsoft stemming tokenizer for Spanish. \ + * **swedish**: Selects the Microsoft stemming tokenizer for Swedish. \ + * **tamil**: Selects the Microsoft stemming tokenizer for Tamil. \ + * **telugu**: Selects the Microsoft stemming tokenizer for Telugu. \ + * **turkish**: Selects the Microsoft stemming tokenizer for Turkish. \ + * **ukrainian**: Selects the Microsoft stemming tokenizer for Ukrainian. \ + * **urdu**: Selects the Microsoft stemming tokenizer for Urdu. + */ +export type MicrosoftStemmingTokenizerLanguage = string; + +/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ +export interface NGramTokenizer extends LexicalTokenizer { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Character classes to keep in the tokens. */ + tokenChars?: TokenCharacterKind[]; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; +} + +export function nGramTokenizerSerializer(item: NGramTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +export function nGramTokenizerDeserializer(item: any): NGramTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */ +export interface PathHierarchyTokenizer extends LexicalTokenizer { + /** The delimiter character to use. Default is "/". */ + delimiter?: string; + /** A value that, if set, replaces the delimiter character. Default is "/". */ + replacement?: string; + /** The maximum token length. Default and maximum is 300. */ + maxTokenLength?: number; + /** A value indicating whether to generate tokens in reverse order. Default is false. */ + reverseTokenOrder?: boolean; + /** The number of initial tokens to skip. Default is 0. */ + numberOfTokensToSkip?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; +} + +export function pathHierarchyTokenizerSerializer(item: PathHierarchyTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + delimiter: item["delimiter"], + replacement: item["replacement"], + maxTokenLength: item["maxTokenLength"], + reverse: item["reverseTokenOrder"], + skip: item["numberOfTokensToSkip"], + }; +} + +export function pathHierarchyTokenizerDeserializer(item: any): PathHierarchyTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + delimiter: item["delimiter"], + replacement: item["replacement"], + maxTokenLength: item["maxTokenLength"], + reverseTokenOrder: item["reverse"], + numberOfTokensToSkip: item["skip"], + }; +} + +/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */ +export interface PatternTokenizer extends LexicalTokenizer { + /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ + pattern?: string; + /** Regular expression flags. */ + flags?: RegexFlags; + /** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */ + group?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.PatternTokenizer"; +} + +export function patternTokenizerSerializer(item: PatternTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + pattern: item["pattern"], + flags: item["flags"], + group: item["group"], + }; +} + +export function patternTokenizerDeserializer(item: any): PatternTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + pattern: item["pattern"], + flags: item["flags"], + group: item["group"], + }; +} + +/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */ +export interface LuceneStandardTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2"; +} + +export function luceneStandardTokenizerSerializer(item: LuceneStandardTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function luceneStandardTokenizerDeserializer(item: any): LuceneStandardTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */ +export interface UaxUrlEmailTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; +} + +export function uaxUrlEmailTokenizerSerializer(item: UaxUrlEmailTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function uaxUrlEmailTokenizerDeserializer(item: any): UaxUrlEmailTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function tokenFilterUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return tokenFilterUnionSerializer(item); + }); +} + +export function tokenFilterUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return tokenFilterUnionDeserializer(item); + }); +} + +/** Base type for token filters. */ +export interface TokenFilter { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.AsciiFoldingTokenFilter, #Microsoft.Azure.Search.CjkBigramTokenFilter, #Microsoft.Azure.Search.CommonGramTokenFilter, #Microsoft.Azure.Search.DictionaryDecompounderTokenFilter, #Microsoft.Azure.Search.EdgeNGramTokenFilterV2, #Microsoft.Azure.Search.ElisionTokenFilter, #Microsoft.Azure.Search.KeepTokenFilter, #Microsoft.Azure.Search.KeywordMarkerTokenFilter, #Microsoft.Azure.Search.LengthTokenFilter, #Microsoft.Azure.Search.LimitTokenFilter, #Microsoft.Azure.Search.NGramTokenFilterV2, #Microsoft.Azure.Search.PatternCaptureTokenFilter, #Microsoft.Azure.Search.PatternReplaceTokenFilter, #Microsoft.Azure.Search.PhoneticTokenFilter, #Microsoft.Azure.Search.ShingleTokenFilter, #Microsoft.Azure.Search.SnowballTokenFilter, #Microsoft.Azure.Search.StemmerTokenFilter, #Microsoft.Azure.Search.StemmerOverrideTokenFilter, #Microsoft.Azure.Search.StopwordsTokenFilter, #Microsoft.Azure.Search.SynonymTokenFilter, #Microsoft.Azure.Search.TruncateTokenFilter, #Microsoft.Azure.Search.UniqueTokenFilter, #Microsoft.Azure.Search.WordDelimiterTokenFilter */ + odatatype: string; + /** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function tokenFilterSerializer(item: TokenFilter): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function tokenFilterDeserializer(item: any): TokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for TokenFilterUnion */ +export type TokenFilterUnion = + | AsciiFoldingTokenFilter + | CjkBigramTokenFilter + | CommonGramTokenFilter + | DictionaryDecompounderTokenFilter + | EdgeNGramTokenFilter + | ElisionTokenFilter + | KeepTokenFilter + | KeywordMarkerTokenFilter + | LengthTokenFilter + | LimitTokenFilter + | NGramTokenFilter + | PatternCaptureTokenFilter + | PatternReplaceTokenFilter + | PhoneticTokenFilter + | ShingleTokenFilter + | SnowballTokenFilter + | StemmerTokenFilter + | StemmerOverrideTokenFilter + | StopwordsTokenFilter + | SynonymTokenFilter + | TruncateTokenFilter + | UniqueTokenFilter + | WordDelimiterTokenFilter + | TokenFilter; + +export function tokenFilterUnionSerializer(item: TokenFilterUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.AsciiFoldingTokenFilter": + return asciiFoldingTokenFilterSerializer(item as AsciiFoldingTokenFilter); + + case "#Microsoft.Azure.Search.CjkBigramTokenFilter": + return cjkBigramTokenFilterSerializer(item as CjkBigramTokenFilter); + + case "#Microsoft.Azure.Search.CommonGramTokenFilter": + return commonGramTokenFilterSerializer(item as CommonGramTokenFilter); + + case "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": + return dictionaryDecompounderTokenFilterSerializer(item as DictionaryDecompounderTokenFilter); + + case "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": + return edgeNGramTokenFilterSerializer(item as EdgeNGramTokenFilter); + + case "#Microsoft.Azure.Search.ElisionTokenFilter": + return elisionTokenFilterSerializer(item as ElisionTokenFilter); + + case "#Microsoft.Azure.Search.KeepTokenFilter": + return keepTokenFilterSerializer(item as KeepTokenFilter); + + case "#Microsoft.Azure.Search.KeywordMarkerTokenFilter": + return keywordMarkerTokenFilterSerializer(item as KeywordMarkerTokenFilter); + + case "#Microsoft.Azure.Search.LengthTokenFilter": + return lengthTokenFilterSerializer(item as LengthTokenFilter); + + case "#Microsoft.Azure.Search.LimitTokenFilter": + return limitTokenFilterSerializer(item as LimitTokenFilter); + + case "#Microsoft.Azure.Search.NGramTokenFilterV2": + return nGramTokenFilterSerializer(item as NGramTokenFilter); + + case "#Microsoft.Azure.Search.PatternCaptureTokenFilter": + return patternCaptureTokenFilterSerializer(item as PatternCaptureTokenFilter); + + case "#Microsoft.Azure.Search.PatternReplaceTokenFilter": + return patternReplaceTokenFilterSerializer(item as PatternReplaceTokenFilter); + + case "#Microsoft.Azure.Search.PhoneticTokenFilter": + return phoneticTokenFilterSerializer(item as PhoneticTokenFilter); + + case "#Microsoft.Azure.Search.ShingleTokenFilter": + return shingleTokenFilterSerializer(item as ShingleTokenFilter); + + case "#Microsoft.Azure.Search.SnowballTokenFilter": + return snowballTokenFilterSerializer(item as SnowballTokenFilter); + + case "#Microsoft.Azure.Search.StemmerTokenFilter": + return stemmerTokenFilterSerializer(item as StemmerTokenFilter); + + case "#Microsoft.Azure.Search.StemmerOverrideTokenFilter": + return stemmerOverrideTokenFilterSerializer(item as StemmerOverrideTokenFilter); + + case "#Microsoft.Azure.Search.StopwordsTokenFilter": + return stopwordsTokenFilterSerializer(item as StopwordsTokenFilter); + + case "#Microsoft.Azure.Search.SynonymTokenFilter": + return synonymTokenFilterSerializer(item as SynonymTokenFilter); + + case "#Microsoft.Azure.Search.TruncateTokenFilter": + return truncateTokenFilterSerializer(item as TruncateTokenFilter); + + case "#Microsoft.Azure.Search.UniqueTokenFilter": + return uniqueTokenFilterSerializer(item as UniqueTokenFilter); + + case "#Microsoft.Azure.Search.WordDelimiterTokenFilter": + return wordDelimiterTokenFilterSerializer(item as WordDelimiterTokenFilter); + + default: + return tokenFilterSerializer(item); + } +} + +export function tokenFilterUnionDeserializer(item: any): TokenFilterUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.AsciiFoldingTokenFilter": + return asciiFoldingTokenFilterDeserializer(item as AsciiFoldingTokenFilter); + + case "#Microsoft.Azure.Search.CjkBigramTokenFilter": + return cjkBigramTokenFilterDeserializer(item as CjkBigramTokenFilter); + + case "#Microsoft.Azure.Search.CommonGramTokenFilter": + return commonGramTokenFilterDeserializer(item as CommonGramTokenFilter); + + case "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": + return dictionaryDecompounderTokenFilterDeserializer( + item as DictionaryDecompounderTokenFilter, + ); + + case "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": + return edgeNGramTokenFilterDeserializer(item as EdgeNGramTokenFilter); + + case "#Microsoft.Azure.Search.ElisionTokenFilter": + return elisionTokenFilterDeserializer(item as ElisionTokenFilter); + + case "#Microsoft.Azure.Search.KeepTokenFilter": + return keepTokenFilterDeserializer(item as KeepTokenFilter); + + case "#Microsoft.Azure.Search.KeywordMarkerTokenFilter": + return keywordMarkerTokenFilterDeserializer(item as KeywordMarkerTokenFilter); + + case "#Microsoft.Azure.Search.LengthTokenFilter": + return lengthTokenFilterDeserializer(item as LengthTokenFilter); + + case "#Microsoft.Azure.Search.LimitTokenFilter": + return limitTokenFilterDeserializer(item as LimitTokenFilter); + + case "#Microsoft.Azure.Search.NGramTokenFilterV2": + return nGramTokenFilterDeserializer(item as NGramTokenFilter); + + case "#Microsoft.Azure.Search.PatternCaptureTokenFilter": + return patternCaptureTokenFilterDeserializer(item as PatternCaptureTokenFilter); + + case "#Microsoft.Azure.Search.PatternReplaceTokenFilter": + return patternReplaceTokenFilterDeserializer(item as PatternReplaceTokenFilter); + + case "#Microsoft.Azure.Search.PhoneticTokenFilter": + return phoneticTokenFilterDeserializer(item as PhoneticTokenFilter); + + case "#Microsoft.Azure.Search.ShingleTokenFilter": + return shingleTokenFilterDeserializer(item as ShingleTokenFilter); + + case "#Microsoft.Azure.Search.SnowballTokenFilter": + return snowballTokenFilterDeserializer(item as SnowballTokenFilter); + + case "#Microsoft.Azure.Search.StemmerTokenFilter": + return stemmerTokenFilterDeserializer(item as StemmerTokenFilter); + + case "#Microsoft.Azure.Search.StemmerOverrideTokenFilter": + return stemmerOverrideTokenFilterDeserializer(item as StemmerOverrideTokenFilter); + + case "#Microsoft.Azure.Search.StopwordsTokenFilter": + return stopwordsTokenFilterDeserializer(item as StopwordsTokenFilter); + + case "#Microsoft.Azure.Search.SynonymTokenFilter": + return synonymTokenFilterDeserializer(item as SynonymTokenFilter); + + case "#Microsoft.Azure.Search.TruncateTokenFilter": + return truncateTokenFilterDeserializer(item as TruncateTokenFilter); + + case "#Microsoft.Azure.Search.UniqueTokenFilter": + return uniqueTokenFilterDeserializer(item as UniqueTokenFilter); + + case "#Microsoft.Azure.Search.WordDelimiterTokenFilter": + return wordDelimiterTokenFilterDeserializer(item as WordDelimiterTokenFilter); + + default: + return tokenFilterDeserializer(item); + } +} + +/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */ +export interface AsciiFoldingTokenFilter extends TokenFilter { + /** A value indicating whether the original token will be kept. Default is false. */ + preserveOriginal?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; +} + +export function asciiFoldingTokenFilterSerializer(item: AsciiFoldingTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + preserveOriginal: item["preserveOriginal"], + }; +} + +export function asciiFoldingTokenFilterDeserializer(item: any): AsciiFoldingTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + preserveOriginal: item["preserveOriginal"], + }; +} + +/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */ +export interface CjkBigramTokenFilter extends TokenFilter { + /** The scripts to ignore. */ + ignoreScripts?: CjkBigramTokenFilterScripts[]; + /** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */ + outputUnigrams?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; +} + +export function cjkBigramTokenFilterSerializer(item: CjkBigramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + ignoreScripts: !item["ignoreScripts"] + ? item["ignoreScripts"] + : item["ignoreScripts"].map((p: any) => { + return p; + }), + outputUnigrams: item["outputUnigrams"], + }; +} + +export function cjkBigramTokenFilterDeserializer(item: any): CjkBigramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + ignoreScripts: !item["ignoreScripts"] + ? item["ignoreScripts"] + : item["ignoreScripts"].map((p: any) => { + return p; + }), + outputUnigrams: item["outputUnigrams"], + }; +} + +/** Scripts that can be ignored by CjkBigramTokenFilter. */ +export enum KnownCjkBigramTokenFilterScripts { + /** Ignore Han script when forming bigrams of CJK terms. */ + Han = "han", + /** Ignore Hiragana script when forming bigrams of CJK terms. */ + Hiragana = "hiragana", + /** Ignore Katakana script when forming bigrams of CJK terms. */ + Katakana = "katakana", + /** Ignore Hangul script when forming bigrams of CJK terms. */ + Hangul = "hangul", +} + +/** + * Scripts that can be ignored by CjkBigramTokenFilter. \ + * {@link KnownCjkBigramTokenFilterScripts} can be used interchangeably with CjkBigramTokenFilterScripts, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **han**: Ignore Han script when forming bigrams of CJK terms. \ + * **hiragana**: Ignore Hiragana script when forming bigrams of CJK terms. \ + * **katakana**: Ignore Katakana script when forming bigrams of CJK terms. \ + * **hangul**: Ignore Hangul script when forming bigrams of CJK terms. + */ +export type CjkBigramTokenFilterScripts = string; + +/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */ +export interface CommonGramTokenFilter extends TokenFilter { + /** The set of common words. */ + commonWords: string[]; + /** A value indicating whether common words matching will be case insensitive. Default is false. */ + ignoreCase?: boolean; + /** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */ + useQueryMode?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; +} + +export function commonGramTokenFilterSerializer(item: CommonGramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + commonWords: item["commonWords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + queryMode: item["useQueryMode"], + }; +} + +export function commonGramTokenFilterDeserializer(item: any): CommonGramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + commonWords: item["commonWords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + useQueryMode: item["queryMode"], + }; +} + +/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */ +export interface DictionaryDecompounderTokenFilter extends TokenFilter { + /** The list of words to match against. */ + wordList: string[]; + /** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */ + minWordSize?: number; + /** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */ + minSubwordSize?: number; + /** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */ + maxSubwordSize?: number; + /** A value indicating whether to add only the longest matching subword to the output. Default is false. */ + onlyLongestMatch?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; +} + +export function dictionaryDecompounderTokenFilterSerializer( + item: DictionaryDecompounderTokenFilter, +): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + wordList: item["wordList"].map((p: any) => { + return p; + }), + minWordSize: item["minWordSize"], + minSubwordSize: item["minSubwordSize"], + maxSubwordSize: item["maxSubwordSize"], + onlyLongestMatch: item["onlyLongestMatch"], + }; +} + +export function dictionaryDecompounderTokenFilterDeserializer( + item: any, +): DictionaryDecompounderTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + wordList: item["wordList"].map((p: any) => { + return p; + }), + minWordSize: item["minWordSize"], + minSubwordSize: item["minSubwordSize"], + maxSubwordSize: item["maxSubwordSize"], + onlyLongestMatch: item["onlyLongestMatch"], + }; +} + +/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */ +export interface EdgeNGramTokenFilter extends TokenFilter { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Specifies which side of the input the n-gram should be generated from. Default is "front". */ + side?: EdgeNGramTokenFilterSide; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"; +} + +export function edgeNGramTokenFilterSerializer(item: EdgeNGramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + side: item["side"], + }; +} + +export function edgeNGramTokenFilterDeserializer(item: any): EdgeNGramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + side: item["side"], + }; +} + +/** Specifies which side of the input an n-gram should be generated from. */ +export enum KnownEdgeNGramTokenFilterSide { + /** Specifies that the n-gram should be generated from the front of the input. */ + Front = "front", + /** Specifies that the n-gram should be generated from the back of the input. */ + Back = "back", +} + +/** + * Specifies which side of the input an n-gram should be generated from. \ + * {@link KnownEdgeNGramTokenFilterSide} can be used interchangeably with EdgeNGramTokenFilterSide, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **front**: Specifies that the n-gram should be generated from the front of the input. \ + * **back**: Specifies that the n-gram should be generated from the back of the input. + */ +export type EdgeNGramTokenFilterSide = string; + +/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. */ +export interface ElisionTokenFilter extends TokenFilter { + /** The set of articles to remove. */ + articles?: string[]; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; +} + +export function elisionTokenFilterSerializer(item: ElisionTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + articles: !item["articles"] + ? item["articles"] + : item["articles"].map((p: any) => { + return p; + }), + }; +} + +export function elisionTokenFilterDeserializer(item: any): ElisionTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + articles: !item["articles"] + ? item["articles"] + : item["articles"].map((p: any) => { + return p; + }), + }; +} + +/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */ +export interface KeepTokenFilter extends TokenFilter { + /** The list of words to keep. */ + keepWords: string[]; + /** A value indicating whether to lower case all words first. Default is false. */ + lowerCaseKeepWords?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; +} + +export function keepTokenFilterSerializer(item: KeepTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + keepWords: item["keepWords"].map((p: any) => { + return p; + }), + keepWordsCase: item["lowerCaseKeepWords"], + }; +} + +export function keepTokenFilterDeserializer(item: any): KeepTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + keepWords: item["keepWords"].map((p: any) => { + return p; + }), + lowerCaseKeepWords: item["keepWordsCase"], + }; +} + +/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */ +export interface KeywordMarkerTokenFilter extends TokenFilter { + /** A list of words to mark as keywords. */ + keywords: string[]; + /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ + ignoreCase?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; +} + +export function keywordMarkerTokenFilterSerializer(item: KeywordMarkerTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + keywords: item["keywords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + }; +} + +export function keywordMarkerTokenFilterDeserializer(item: any): KeywordMarkerTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + keywords: item["keywords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + }; +} + +/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */ +export interface LengthTokenFilter extends TokenFilter { + /** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */ + minLength?: number; + /** The maximum length in characters. Default and maximum is 300. */ + maxLength?: number; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; +} + +export function lengthTokenFilterSerializer(item: LengthTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + min: item["minLength"], + max: item["maxLength"], + }; +} + +export function lengthTokenFilterDeserializer(item: any): LengthTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + minLength: item["min"], + maxLength: item["max"], + }; +} + +/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */ +export interface LimitTokenFilter extends TokenFilter { + /** The maximum number of tokens to produce. Default is 1. */ + maxTokenCount?: number; + /** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */ + consumeAllTokens?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; +} + +export function limitTokenFilterSerializer(item: LimitTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenCount: item["maxTokenCount"], + consumeAllTokens: item["consumeAllTokens"], + }; +} + +export function limitTokenFilterDeserializer(item: any): LimitTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenCount: item["maxTokenCount"], + consumeAllTokens: item["consumeAllTokens"], + }; +} + +/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */ +export interface NGramTokenFilter extends TokenFilter { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2"; +} + +export function nGramTokenFilterSerializer(item: NGramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + }; +} + +export function nGramTokenFilterDeserializer(item: any): NGramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + }; +} + +/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */ +export interface PatternCaptureTokenFilter extends TokenFilter { + /** A list of patterns to match against each token. */ + patterns: string[]; + /** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */ + preserveOriginal?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; +} + +export function patternCaptureTokenFilterSerializer(item: PatternCaptureTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + patterns: item["patterns"].map((p: any) => { + return p; + }), + preserveOriginal: item["preserveOriginal"], + }; +} + +export function patternCaptureTokenFilterDeserializer(item: any): PatternCaptureTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + patterns: item["patterns"].map((p: any) => { + return p; + }), + preserveOriginal: item["preserveOriginal"], + }; +} + +/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. */ +export interface PatternReplaceTokenFilter extends TokenFilter { + /** A regular expression pattern. */ + pattern: string; + /** The replacement text. */ + replacement: string; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; +} + +export function patternReplaceTokenFilterSerializer(item: PatternReplaceTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +export function patternReplaceTokenFilterDeserializer(item: any): PatternReplaceTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */ +export interface PhoneticTokenFilter extends TokenFilter { + /** The phonetic encoder to use. Default is "metaphone". */ + encoder?: PhoneticEncoder; + /** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */ + replaceOriginalTokens?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; +} + +export function phoneticTokenFilterSerializer(item: PhoneticTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + encoder: item["encoder"], + replace: item["replaceOriginalTokens"], + }; +} + +export function phoneticTokenFilterDeserializer(item: any): PhoneticTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + encoder: item["encoder"], + replaceOriginalTokens: item["replace"], + }; +} + +/** Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. */ +export enum KnownPhoneticEncoder { + /** Encodes a token into a Metaphone value. */ + Metaphone = "metaphone", + /** Encodes a token into a double metaphone value. */ + DoubleMetaphone = "doubleMetaphone", + /** Encodes a token into a Soundex value. */ + Soundex = "soundex", + /** Encodes a token into a Refined Soundex value. */ + RefinedSoundex = "refinedSoundex", + /** Encodes a token into a Caverphone 1.0 value. */ + Caverphone1 = "caverphone1", + /** Encodes a token into a Caverphone 2.0 value. */ + Caverphone2 = "caverphone2", + /** Encodes a token into a Cologne Phonetic value. */ + Cologne = "cologne", + /** Encodes a token into a NYSIIS value. */ + Nysiis = "nysiis", + /** Encodes a token using the Kölner Phonetik algorithm. */ + KoelnerPhonetik = "koelnerPhonetik", + /** Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. */ + HaasePhonetik = "haasePhonetik", + /** Encodes a token into a Beider-Morse value. */ + BeiderMorse = "beiderMorse", +} + +/** + * Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. \ + * {@link KnownPhoneticEncoder} can be used interchangeably with PhoneticEncoder, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **metaphone**: Encodes a token into a Metaphone value. \ + * **doubleMetaphone**: Encodes a token into a double metaphone value. \ + * **soundex**: Encodes a token into a Soundex value. \ + * **refinedSoundex**: Encodes a token into a Refined Soundex value. \ + * **caverphone1**: Encodes a token into a Caverphone 1.0 value. \ + * **caverphone2**: Encodes a token into a Caverphone 2.0 value. \ + * **cologne**: Encodes a token into a Cologne Phonetic value. \ + * **nysiis**: Encodes a token into a NYSIIS value. \ + * **koelnerPhonetik**: Encodes a token using the Kölner Phonetik algorithm. \ + * **haasePhonetik**: Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. \ + * **beiderMorse**: Encodes a token into a Beider-Morse value. + */ +export type PhoneticEncoder = string; + +/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */ +export interface ShingleTokenFilter extends TokenFilter { + /** The maximum shingle size. Default and minimum value is 2. */ + maxShingleSize?: number; + /** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */ + minShingleSize?: number; + /** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */ + outputUnigrams?: boolean; + /** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */ + outputUnigramsIfNoShingles?: boolean; + /** The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). */ + tokenSeparator?: string; + /** The string to insert for each position at which there is no token. Default is an underscore ("_"). */ + filterToken?: string; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; +} + +export function shingleTokenFilterSerializer(item: ShingleTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxShingleSize: item["maxShingleSize"], + minShingleSize: item["minShingleSize"], + outputUnigrams: item["outputUnigrams"], + outputUnigramsIfNoShingles: item["outputUnigramsIfNoShingles"], + tokenSeparator: item["tokenSeparator"], + filterToken: item["filterToken"], + }; +} + +export function shingleTokenFilterDeserializer(item: any): ShingleTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxShingleSize: item["maxShingleSize"], + minShingleSize: item["minShingleSize"], + outputUnigrams: item["outputUnigrams"], + outputUnigramsIfNoShingles: item["outputUnigramsIfNoShingles"], + tokenSeparator: item["tokenSeparator"], + filterToken: item["filterToken"], + }; +} + +/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */ +export interface SnowballTokenFilter extends TokenFilter { + /** The language to use. */ + language: SnowballTokenFilterLanguage; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; +} + +export function snowballTokenFilterSerializer(item: SnowballTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + language: item["language"], + }; +} + +export function snowballTokenFilterDeserializer(item: any): SnowballTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + language: item["language"], + }; +} + +/** The language to use for a Snowball token filter. */ +export enum KnownSnowballTokenFilterLanguage { + /** Selects the Lucene Snowball stemming tokenizer for Armenian. */ + Armenian = "armenian", + /** Selects the Lucene Snowball stemming tokenizer for Basque. */ + Basque = "basque", + /** Selects the Lucene Snowball stemming tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Lucene Snowball stemming tokenizer for Danish. */ + Danish = "danish", + /** Selects the Lucene Snowball stemming tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Lucene Snowball stemming tokenizer for English. */ + English = "english", + /** Selects the Lucene Snowball stemming tokenizer for Finnish. */ + Finnish = "finnish", + /** Selects the Lucene Snowball stemming tokenizer for French. */ + French = "french", + /** Selects the Lucene Snowball stemming tokenizer for German. */ + German = "german", + /** Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. */ + German2 = "german2", + /** Selects the Lucene Snowball stemming tokenizer for Hungarian. */ + Hungarian = "hungarian", + /** Selects the Lucene Snowball stemming tokenizer for Italian. */ + Italian = "italian", + /** Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. */ + Kp = "kp", + /** Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. */ + Lovins = "lovins", + /** Selects the Lucene Snowball stemming tokenizer for Norwegian. */ + Norwegian = "norwegian", + /** Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. */ + Porter = "porter", + /** Selects the Lucene Snowball stemming tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Lucene Snowball stemming tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Lucene Snowball stemming tokenizer for Russian. */ + Russian = "russian", + /** Selects the Lucene Snowball stemming tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Lucene Snowball stemming tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Lucene Snowball stemming tokenizer for Turkish. */ + Turkish = "turkish", +} + +/** + * The language to use for a Snowball token filter. \ + * {@link KnownSnowballTokenFilterLanguage} can be used interchangeably with SnowballTokenFilterLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **armenian**: Selects the Lucene Snowball stemming tokenizer for Armenian. \ + * **basque**: Selects the Lucene Snowball stemming tokenizer for Basque. \ + * **catalan**: Selects the Lucene Snowball stemming tokenizer for Catalan. \ + * **danish**: Selects the Lucene Snowball stemming tokenizer for Danish. \ + * **dutch**: Selects the Lucene Snowball stemming tokenizer for Dutch. \ + * **english**: Selects the Lucene Snowball stemming tokenizer for English. \ + * **finnish**: Selects the Lucene Snowball stemming tokenizer for Finnish. \ + * **french**: Selects the Lucene Snowball stemming tokenizer for French. \ + * **german**: Selects the Lucene Snowball stemming tokenizer for German. \ + * **german2**: Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. \ + * **hungarian**: Selects the Lucene Snowball stemming tokenizer for Hungarian. \ + * **italian**: Selects the Lucene Snowball stemming tokenizer for Italian. \ + * **kp**: Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. \ + * **lovins**: Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. \ + * **norwegian**: Selects the Lucene Snowball stemming tokenizer for Norwegian. \ + * **porter**: Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. \ + * **portuguese**: Selects the Lucene Snowball stemming tokenizer for Portuguese. \ + * **romanian**: Selects the Lucene Snowball stemming tokenizer for Romanian. \ + * **russian**: Selects the Lucene Snowball stemming tokenizer for Russian. \ + * **spanish**: Selects the Lucene Snowball stemming tokenizer for Spanish. \ + * **swedish**: Selects the Lucene Snowball stemming tokenizer for Swedish. \ + * **turkish**: Selects the Lucene Snowball stemming tokenizer for Turkish. + */ +export type SnowballTokenFilterLanguage = string; + +/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */ +export interface StemmerTokenFilter extends TokenFilter { + /** The language to use. */ + language: StemmerTokenFilterLanguage; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; +} + +export function stemmerTokenFilterSerializer(item: StemmerTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + language: item["language"], + }; +} + +export function stemmerTokenFilterDeserializer(item: any): StemmerTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + language: item["language"], + }; +} + +/** The language to use for a stemmer token filter. */ +export enum KnownStemmerTokenFilterLanguage { + /** Selects the Lucene stemming tokenizer for Arabic. */ + Arabic = "arabic", + /** Selects the Lucene stemming tokenizer for Armenian. */ + Armenian = "armenian", + /** Selects the Lucene stemming tokenizer for Basque. */ + Basque = "basque", + /** Selects the Lucene stemming tokenizer for Portuguese (Brazil). */ + Brazilian = "brazilian", + /** Selects the Lucene stemming tokenizer for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the Lucene stemming tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Lucene stemming tokenizer for Czech. */ + Czech = "czech", + /** Selects the Lucene stemming tokenizer for Danish. */ + Danish = "danish", + /** Selects the Lucene stemming tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. */ + DutchKp = "dutchKp", + /** Selects the Lucene stemming tokenizer for English. */ + English = "english", + /** Selects the Lucene stemming tokenizer for English that does light stemming. */ + LightEnglish = "lightEnglish", + /** Selects the Lucene stemming tokenizer for English that does minimal stemming. */ + MinimalEnglish = "minimalEnglish", + /** Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. */ + PossessiveEnglish = "possessiveEnglish", + /** Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. */ + Porter2 = "porter2", + /** Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. */ + Lovins = "lovins", + /** Selects the Lucene stemming tokenizer for Finnish. */ + Finnish = "finnish", + /** Selects the Lucene stemming tokenizer for Finnish that does light stemming. */ + LightFinnish = "lightFinnish", + /** Selects the Lucene stemming tokenizer for French. */ + French = "french", + /** Selects the Lucene stemming tokenizer for French that does light stemming. */ + LightFrench = "lightFrench", + /** Selects the Lucene stemming tokenizer for French that does minimal stemming. */ + MinimalFrench = "minimalFrench", + /** Selects the Lucene stemming tokenizer for Galician. */ + Galician = "galician", + /** Selects the Lucene stemming tokenizer for Galician that does minimal stemming. */ + MinimalGalician = "minimalGalician", + /** Selects the Lucene stemming tokenizer for German. */ + German = "german", + /** Selects the Lucene stemming tokenizer that uses the German variant algorithm. */ + German2 = "german2", + /** Selects the Lucene stemming tokenizer for German that does light stemming. */ + LightGerman = "lightGerman", + /** Selects the Lucene stemming tokenizer for German that does minimal stemming. */ + MinimalGerman = "minimalGerman", + /** Selects the Lucene stemming tokenizer for Greek. */ + Greek = "greek", + /** Selects the Lucene stemming tokenizer for Hindi. */ + Hindi = "hindi", + /** Selects the Lucene stemming tokenizer for Hungarian. */ + Hungarian = "hungarian", + /** Selects the Lucene stemming tokenizer for Hungarian that does light stemming. */ + LightHungarian = "lightHungarian", + /** Selects the Lucene stemming tokenizer for Indonesian. */ + Indonesian = "indonesian", + /** Selects the Lucene stemming tokenizer for Irish. */ + Irish = "irish", + /** Selects the Lucene stemming tokenizer for Italian. */ + Italian = "italian", + /** Selects the Lucene stemming tokenizer for Italian that does light stemming. */ + LightItalian = "lightItalian", + /** Selects the Lucene stemming tokenizer for Sorani. */ + Sorani = "sorani", + /** Selects the Lucene stemming tokenizer for Latvian. */ + Latvian = "latvian", + /** Selects the Lucene stemming tokenizer for Norwegian (Bokmål). */ + Norwegian = "norwegian", + /** Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. */ + LightNorwegian = "lightNorwegian", + /** Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. */ + MinimalNorwegian = "minimalNorwegian", + /** Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. */ + LightNynorsk = "lightNynorsk", + /** Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. */ + MinimalNynorsk = "minimalNynorsk", + /** Selects the Lucene stemming tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Lucene stemming tokenizer for Portuguese that does light stemming. */ + LightPortuguese = "lightPortuguese", + /** Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. */ + MinimalPortuguese = "minimalPortuguese", + /** Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. */ + PortugueseRslp = "portugueseRslp", + /** Selects the Lucene stemming tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Lucene stemming tokenizer for Russian. */ + Russian = "russian", + /** Selects the Lucene stemming tokenizer for Russian that does light stemming. */ + LightRussian = "lightRussian", + /** Selects the Lucene stemming tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Lucene stemming tokenizer for Spanish that does light stemming. */ + LightSpanish = "lightSpanish", + /** Selects the Lucene stemming tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Lucene stemming tokenizer for Swedish that does light stemming. */ + LightSwedish = "lightSwedish", + /** Selects the Lucene stemming tokenizer for Turkish. */ + Turkish = "turkish", +} + +/** + * The language to use for a stemmer token filter. \ + * {@link KnownStemmerTokenFilterLanguage} can be used interchangeably with StemmerTokenFilterLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic**: Selects the Lucene stemming tokenizer for Arabic. \ + * **armenian**: Selects the Lucene stemming tokenizer for Armenian. \ + * **basque**: Selects the Lucene stemming tokenizer for Basque. \ + * **brazilian**: Selects the Lucene stemming tokenizer for Portuguese (Brazil). \ + * **bulgarian**: Selects the Lucene stemming tokenizer for Bulgarian. \ + * **catalan**: Selects the Lucene stemming tokenizer for Catalan. \ + * **czech**: Selects the Lucene stemming tokenizer for Czech. \ + * **danish**: Selects the Lucene stemming tokenizer for Danish. \ + * **dutch**: Selects the Lucene stemming tokenizer for Dutch. \ + * **dutchKp**: Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. \ + * **english**: Selects the Lucene stemming tokenizer for English. \ + * **lightEnglish**: Selects the Lucene stemming tokenizer for English that does light stemming. \ + * **minimalEnglish**: Selects the Lucene stemming tokenizer for English that does minimal stemming. \ + * **possessiveEnglish**: Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. \ + * **porter2**: Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. \ + * **lovins**: Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. \ + * **finnish**: Selects the Lucene stemming tokenizer for Finnish. \ + * **lightFinnish**: Selects the Lucene stemming tokenizer for Finnish that does light stemming. \ + * **french**: Selects the Lucene stemming tokenizer for French. \ + * **lightFrench**: Selects the Lucene stemming tokenizer for French that does light stemming. \ + * **minimalFrench**: Selects the Lucene stemming tokenizer for French that does minimal stemming. \ + * **galician**: Selects the Lucene stemming tokenizer for Galician. \ + * **minimalGalician**: Selects the Lucene stemming tokenizer for Galician that does minimal stemming. \ + * **german**: Selects the Lucene stemming tokenizer for German. \ + * **german2**: Selects the Lucene stemming tokenizer that uses the German variant algorithm. \ + * **lightGerman**: Selects the Lucene stemming tokenizer for German that does light stemming. \ + * **minimalGerman**: Selects the Lucene stemming tokenizer for German that does minimal stemming. \ + * **greek**: Selects the Lucene stemming tokenizer for Greek. \ + * **hindi**: Selects the Lucene stemming tokenizer for Hindi. \ + * **hungarian**: Selects the Lucene stemming tokenizer for Hungarian. \ + * **lightHungarian**: Selects the Lucene stemming tokenizer for Hungarian that does light stemming. \ + * **indonesian**: Selects the Lucene stemming tokenizer for Indonesian. \ + * **irish**: Selects the Lucene stemming tokenizer for Irish. \ + * **italian**: Selects the Lucene stemming tokenizer for Italian. \ + * **lightItalian**: Selects the Lucene stemming tokenizer for Italian that does light stemming. \ + * **sorani**: Selects the Lucene stemming tokenizer for Sorani. \ + * **latvian**: Selects the Lucene stemming tokenizer for Latvian. \ + * **norwegian**: Selects the Lucene stemming tokenizer for Norwegian (Bokmål). \ + * **lightNorwegian**: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. \ + * **minimalNorwegian**: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. \ + * **lightNynorsk**: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. \ + * **minimalNynorsk**: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. \ + * **portuguese**: Selects the Lucene stemming tokenizer for Portuguese. \ + * **lightPortuguese**: Selects the Lucene stemming tokenizer for Portuguese that does light stemming. \ + * **minimalPortuguese**: Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. \ + * **portugueseRslp**: Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. \ + * **romanian**: Selects the Lucene stemming tokenizer for Romanian. \ + * **russian**: Selects the Lucene stemming tokenizer for Russian. \ + * **lightRussian**: Selects the Lucene stemming tokenizer for Russian that does light stemming. \ + * **spanish**: Selects the Lucene stemming tokenizer for Spanish. \ + * **lightSpanish**: Selects the Lucene stemming tokenizer for Spanish that does light stemming. \ + * **swedish**: Selects the Lucene stemming tokenizer for Swedish. \ + * **lightSwedish**: Selects the Lucene stemming tokenizer for Swedish that does light stemming. \ + * **turkish**: Selects the Lucene stemming tokenizer for Turkish. + */ +export type StemmerTokenFilterLanguage = string; + +/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */ +export interface StemmerOverrideTokenFilter extends TokenFilter { + /** A list of stemming rules in the following format: "word => stem", for example: "ran => run". */ + rules: string[]; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; +} + +export function stemmerOverrideTokenFilterSerializer(item: StemmerOverrideTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + rules: item["rules"].map((p: any) => { + return p; + }), + }; +} + +export function stemmerOverrideTokenFilterDeserializer(item: any): StemmerOverrideTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + rules: item["rules"].map((p: any) => { + return p; + }), + }; +} + +/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */ +export interface StopwordsTokenFilter extends TokenFilter { + /** The list of stopwords. This property and the stopwords list property cannot both be set. */ + stopwords?: string[]; + /** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */ + stopwordsList?: StopwordsList; + /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ + ignoreCase?: boolean; + /** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */ + removeTrailingStopWords?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; +} + +export function stopwordsTokenFilterSerializer(item: StopwordsTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + stopwordsList: item["stopwordsList"], + ignoreCase: item["ignoreCase"], + removeTrailing: item["removeTrailingStopWords"], + }; +} + +export function stopwordsTokenFilterDeserializer(item: any): StopwordsTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + stopwordsList: item["stopwordsList"], + ignoreCase: item["ignoreCase"], + removeTrailingStopWords: item["removeTrailing"], + }; +} + +/** Identifies a predefined list of language-specific stopwords. */ +export enum KnownStopwordsList { + /** Selects the stopword list for Arabic. */ + Arabic = "arabic", + /** Selects the stopword list for Armenian. */ + Armenian = "armenian", + /** Selects the stopword list for Basque. */ + Basque = "basque", + /** Selects the stopword list for Portuguese (Brazil). */ + Brazilian = "brazilian", + /** Selects the stopword list for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the stopword list for Catalan. */ + Catalan = "catalan", + /** Selects the stopword list for Czech. */ + Czech = "czech", + /** Selects the stopword list for Danish. */ + Danish = "danish", + /** Selects the stopword list for Dutch. */ + Dutch = "dutch", + /** Selects the stopword list for English. */ + English = "english", + /** Selects the stopword list for Finnish. */ + Finnish = "finnish", + /** Selects the stopword list for French. */ + French = "french", + /** Selects the stopword list for Galician. */ + Galician = "galician", + /** Selects the stopword list for German. */ + German = "german", + /** Selects the stopword list for Greek. */ + Greek = "greek", + /** Selects the stopword list for Hindi. */ + Hindi = "hindi", + /** Selects the stopword list for Hungarian. */ + Hungarian = "hungarian", + /** Selects the stopword list for Indonesian. */ + Indonesian = "indonesian", + /** Selects the stopword list for Irish. */ + Irish = "irish", + /** Selects the stopword list for Italian. */ + Italian = "italian", + /** Selects the stopword list for Latvian. */ + Latvian = "latvian", + /** Selects the stopword list for Norwegian. */ + Norwegian = "norwegian", + /** Selects the stopword list for Persian. */ + Persian = "persian", + /** Selects the stopword list for Portuguese. */ + Portuguese = "portuguese", + /** Selects the stopword list for Romanian. */ + Romanian = "romanian", + /** Selects the stopword list for Russian. */ + Russian = "russian", + /** Selects the stopword list for Sorani. */ + Sorani = "sorani", + /** Selects the stopword list for Spanish. */ + Spanish = "spanish", + /** Selects the stopword list for Swedish. */ + Swedish = "swedish", + /** Selects the stopword list for Thai. */ + Thai = "thai", + /** Selects the stopword list for Turkish. */ + Turkish = "turkish", +} + +/** + * Identifies a predefined list of language-specific stopwords. \ + * {@link KnownStopwordsList} can be used interchangeably with StopwordsList, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic**: Selects the stopword list for Arabic. \ + * **armenian**: Selects the stopword list for Armenian. \ + * **basque**: Selects the stopword list for Basque. \ + * **brazilian**: Selects the stopword list for Portuguese (Brazil). \ + * **bulgarian**: Selects the stopword list for Bulgarian. \ + * **catalan**: Selects the stopword list for Catalan. \ + * **czech**: Selects the stopword list for Czech. \ + * **danish**: Selects the stopword list for Danish. \ + * **dutch**: Selects the stopword list for Dutch. \ + * **english**: Selects the stopword list for English. \ + * **finnish**: Selects the stopword list for Finnish. \ + * **french**: Selects the stopword list for French. \ + * **galician**: Selects the stopword list for Galician. \ + * **german**: Selects the stopword list for German. \ + * **greek**: Selects the stopword list for Greek. \ + * **hindi**: Selects the stopword list for Hindi. \ + * **hungarian**: Selects the stopword list for Hungarian. \ + * **indonesian**: Selects the stopword list for Indonesian. \ + * **irish**: Selects the stopword list for Irish. \ + * **italian**: Selects the stopword list for Italian. \ + * **latvian**: Selects the stopword list for Latvian. \ + * **norwegian**: Selects the stopword list for Norwegian. \ + * **persian**: Selects the stopword list for Persian. \ + * **portuguese**: Selects the stopword list for Portuguese. \ + * **romanian**: Selects the stopword list for Romanian. \ + * **russian**: Selects the stopword list for Russian. \ + * **sorani**: Selects the stopword list for Sorani. \ + * **spanish**: Selects the stopword list for Spanish. \ + * **swedish**: Selects the stopword list for Swedish. \ + * **thai**: Selects the stopword list for Thai. \ + * **turkish**: Selects the stopword list for Turkish. + */ +export type StopwordsList = string; + +/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */ +export interface SynonymTokenFilter extends TokenFilter { + /** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */ + synonyms: string[]; + /** A value indicating whether to case-fold input for matching. Default is false. */ + ignoreCase?: boolean; + /** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */ + expand?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; +} + +export function synonymTokenFilterSerializer(item: SynonymTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + synonyms: item["synonyms"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + expand: item["expand"], + }; +} + +export function synonymTokenFilterDeserializer(item: any): SynonymTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + synonyms: item["synonyms"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + expand: item["expand"], + }; +} + +/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */ +export interface TruncateTokenFilter extends TokenFilter { + /** The length at which terms will be truncated. Default and maximum is 300. */ + length?: number; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; +} + +export function truncateTokenFilterSerializer(item: TruncateTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + length: item["length"], + }; +} + +export function truncateTokenFilterDeserializer(item: any): TruncateTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + length: item["length"], + }; +} + +/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */ +export interface UniqueTokenFilter extends TokenFilter { + /** A value indicating whether to remove duplicates only at the same position. Default is false. */ + onlyOnSamePosition?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; +} + +export function uniqueTokenFilterSerializer(item: UniqueTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + onlyOnSamePosition: item["onlyOnSamePosition"], + }; +} + +export function uniqueTokenFilterDeserializer(item: any): UniqueTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + onlyOnSamePosition: item["onlyOnSamePosition"], + }; +} + +/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */ +export interface WordDelimiterTokenFilter extends TokenFilter { + /** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. */ + generateWordParts?: boolean; + /** A value indicating whether to generate number subwords. Default is true. */ + generateNumberParts?: boolean; + /** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. */ + catenateWords?: boolean; + /** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. */ + catenateNumbers?: boolean; + /** A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. */ + catenateAll?: boolean; + /** A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. */ + splitOnCaseChange?: boolean; + /** A value indicating whether original words will be preserved and added to the subword list. Default is false. */ + preserveOriginal?: boolean; + /** A value indicating whether to split on numbers. For example, if this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. */ + splitOnNumerics?: boolean; + /** A value indicating whether to remove trailing "'s" for each subword. Default is true. */ + stemEnglishPossessive?: boolean; + /** A list of tokens to protect from being delimited. */ + protectedWords?: string[]; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; +} + +export function wordDelimiterTokenFilterSerializer(item: WordDelimiterTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + generateWordParts: item["generateWordParts"], + generateNumberParts: item["generateNumberParts"], + catenateWords: item["catenateWords"], + catenateNumbers: item["catenateNumbers"], + catenateAll: item["catenateAll"], + splitOnCaseChange: item["splitOnCaseChange"], + preserveOriginal: item["preserveOriginal"], + splitOnNumerics: item["splitOnNumerics"], + stemEnglishPossessive: item["stemEnglishPossessive"], + protectedWords: !item["protectedWords"] + ? item["protectedWords"] + : item["protectedWords"].map((p: any) => { + return p; + }), + }; +} + +export function wordDelimiterTokenFilterDeserializer(item: any): WordDelimiterTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + generateWordParts: item["generateWordParts"], + generateNumberParts: item["generateNumberParts"], + catenateWords: item["catenateWords"], + catenateNumbers: item["catenateNumbers"], + catenateAll: item["catenateAll"], + splitOnCaseChange: item["splitOnCaseChange"], + preserveOriginal: item["preserveOriginal"], + splitOnNumerics: item["splitOnNumerics"], + stemEnglishPossessive: item["stemEnglishPossessive"], + protectedWords: !item["protectedWords"] + ? item["protectedWords"] + : item["protectedWords"].map((p: any) => { + return p; + }), + }; +} + +export function charFilterUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return charFilterUnionSerializer(item); + }); +} + +export function charFilterUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return charFilterUnionDeserializer(item); + }); +} + +/** Base type for character filters. */ +export interface CharFilter { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.MappingCharFilter, #Microsoft.Azure.Search.PatternReplaceCharFilter */ + odatatype: string; + /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function charFilterSerializer(item: CharFilter): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function charFilterDeserializer(item: any): CharFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for CharFilterUnion */ +export type CharFilterUnion = MappingCharFilter | PatternReplaceCharFilter | CharFilter; + +export function charFilterUnionSerializer(item: CharFilterUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.MappingCharFilter": + return mappingCharFilterSerializer(item as MappingCharFilter); + + case "#Microsoft.Azure.Search.PatternReplaceCharFilter": + return patternReplaceCharFilterSerializer(item as PatternReplaceCharFilter); + + default: + return charFilterSerializer(item); + } +} + +export function charFilterUnionDeserializer(item: any): CharFilterUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.MappingCharFilter": + return mappingCharFilterDeserializer(item as MappingCharFilter); + + case "#Microsoft.Azure.Search.PatternReplaceCharFilter": + return patternReplaceCharFilterDeserializer(item as PatternReplaceCharFilter); + + default: + return charFilterDeserializer(item); + } +} + +/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */ +export interface MappingCharFilter extends CharFilter { + /** A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). */ + mappings: string[]; + /** A URI fragment specifying the type of char filter. */ + odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; +} + +export function mappingCharFilterSerializer(item: MappingCharFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + mappings: item["mappings"].map((p: any) => { + return p; + }), + }; +} + +export function mappingCharFilterDeserializer(item: any): MappingCharFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + mappings: item["mappings"].map((p: any) => { + return p; + }), + }; +} + +/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. */ +export interface PatternReplaceCharFilter extends CharFilter { + /** A regular expression pattern. */ + pattern: string; + /** The replacement text. */ + replacement: string; + /** A URI fragment specifying the type of char filter. */ + odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; +} + +export function patternReplaceCharFilterSerializer(item: PatternReplaceCharFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +export function patternReplaceCharFilterDeserializer(item: any): PatternReplaceCharFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +export function lexicalNormalizerUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return lexicalNormalizerUnionSerializer(item); + }); +} + +export function lexicalNormalizerUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return lexicalNormalizerUnionDeserializer(item); + }); +} + +/** Base type for normalizers. */ +export interface LexicalNormalizer { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.CustomNormalizer */ + odatatype: string; + /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function lexicalNormalizerSerializer(item: LexicalNormalizer): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function lexicalNormalizerDeserializer(item: any): LexicalNormalizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for LexicalNormalizerUnion */ +export type LexicalNormalizerUnion = CustomNormalizer | LexicalNormalizer; + +export function lexicalNormalizerUnionSerializer(item: LexicalNormalizerUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomNormalizer": + return customNormalizerSerializer(item as CustomNormalizer); + + default: + return lexicalNormalizerSerializer(item); + } +} + +export function lexicalNormalizerUnionDeserializer(item: any): LexicalNormalizerUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomNormalizer": + return customNormalizerDeserializer(item as CustomNormalizer); + + default: + return lexicalNormalizerDeserializer(item); + } +} + +/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */ +export interface CustomNormalizer extends LexicalNormalizer { + /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ + tokenFilters?: TokenFilterName[]; + /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ + charFilters?: CharFilterName[]; + /** A URI fragment specifying the type of normalizer. */ + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; +} + +export function customNormalizerSerializer(item: CustomNormalizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +export function customNormalizerDeserializer(item: any): CustomNormalizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */ +export interface SimilarityAlgorithm { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.ClassicSimilarity, #Microsoft.Azure.Search.BM25Similarity */ + odatatype: string; +} + +export function similarityAlgorithmSerializer(item: SimilarityAlgorithm): any { + return { "@odata.type": item["odatatype"] }; +} + +export function similarityAlgorithmDeserializer(item: any): SimilarityAlgorithm { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for SimilarityAlgorithmUnion */ +export type SimilarityAlgorithmUnion = ClassicSimilarity | BM25Similarity | SimilarityAlgorithm; + +export function similarityAlgorithmUnionSerializer(item: SimilarityAlgorithmUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicSimilarity": + return classicSimilaritySerializer(item as ClassicSimilarity); + + case "#Microsoft.Azure.Search.BM25Similarity": + return bm25SimilaritySerializer(item as BM25Similarity); + + default: + return similarityAlgorithmSerializer(item); + } +} + +export function similarityAlgorithmUnionDeserializer(item: any): SimilarityAlgorithmUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicSimilarity": + return classicSimilarityDeserializer(item as ClassicSimilarity); + + case "#Microsoft.Azure.Search.BM25Similarity": + return bm25SimilarityDeserializer(item as BM25Similarity); + + default: + return similarityAlgorithmDeserializer(item); + } +} + +/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */ +export interface ClassicSimilarity extends SimilarityAlgorithm { + /** The discriminator for derived types. */ + odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; +} + +export function classicSimilaritySerializer(item: ClassicSimilarity): any { + return { "@odata.type": item["odatatype"] }; +} + +export function classicSimilarityDeserializer(item: any): ClassicSimilarity { + return { + odatatype: item["@odata.type"], + }; +} + +/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */ +export interface BM25Similarity extends SimilarityAlgorithm { + /** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */ + k1?: number; + /** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */ + b?: number; + /** The discriminator for derived types. */ + odatatype: "#Microsoft.Azure.Search.BM25Similarity"; +} + +export function bm25SimilaritySerializer(item: BM25Similarity): any { + return { "@odata.type": item["odatatype"], k1: item["k1"], b: item["b"] }; +} + +export function bm25SimilarityDeserializer(item: any): BM25Similarity { + return { + odatatype: item["@odata.type"], + k1: item["k1"], + b: item["b"], + }; +} + +/** Defines parameters for a search index that influence semantic capabilities. */ +export interface SemanticSearch { + /** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */ + defaultConfigurationName?: string; + /** The semantic configurations for the index. */ + configurations?: SemanticConfiguration[]; +} + +export function semanticSearchSerializer(item: SemanticSearch): any { + return { + defaultConfiguration: item["defaultConfigurationName"], + configurations: !item["configurations"] + ? item["configurations"] + : semanticConfigurationArraySerializer(item["configurations"]), + }; +} + +export function semanticSearchDeserializer(item: any): SemanticSearch { + return { + defaultConfigurationName: item["defaultConfiguration"], + configurations: !item["configurations"] + ? item["configurations"] + : semanticConfigurationArrayDeserializer(item["configurations"]), + }; +} + +export function semanticConfigurationArraySerializer(result: Array): any[] { + return result.map((item) => { + return semanticConfigurationSerializer(item); + }); +} + +export function semanticConfigurationArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return semanticConfigurationDeserializer(item); + }); +} + +/** Defines a specific configuration to be used in the context of semantic capabilities. */ +export interface SemanticConfiguration { + /** The name of the semantic configuration. */ + name: string; + /** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */ + prioritizedFields: SemanticPrioritizedFields; + /** Specifies the score type to be used for the sort order of the search results. */ + rankingOrder?: RankingOrder; + /** Determines which semantic or query rewrite models to use during model flighting/upgrades. */ + flightingOptIn?: boolean; +} + +export function semanticConfigurationSerializer(item: SemanticConfiguration): any { + return { + name: item["name"], + prioritizedFields: semanticPrioritizedFieldsSerializer(item["prioritizedFields"]), + rankingOrder: item["rankingOrder"], + flightingOptIn: item["flightingOptIn"], + }; +} + +export function semanticConfigurationDeserializer(item: any): SemanticConfiguration { + return { + name: item["name"], + prioritizedFields: semanticPrioritizedFieldsDeserializer(item["prioritizedFields"]), + rankingOrder: item["rankingOrder"], + flightingOptIn: item["flightingOptIn"], + }; +} + +/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */ +export interface SemanticPrioritizedFields { + /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */ + titleField?: SemanticField; + /** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */ + contentFields?: SemanticField[]; + /** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */ + keywordsFields?: SemanticField[]; +} + +export function semanticPrioritizedFieldsSerializer(item: SemanticPrioritizedFields): any { + return { + titleField: !item["titleField"] + ? item["titleField"] + : semanticFieldSerializer(item["titleField"]), + prioritizedContentFields: !item["contentFields"] + ? item["contentFields"] + : semanticFieldArraySerializer(item["contentFields"]), + prioritizedKeywordsFields: !item["keywordsFields"] + ? item["keywordsFields"] + : semanticFieldArraySerializer(item["keywordsFields"]), + }; +} + +export function semanticPrioritizedFieldsDeserializer(item: any): SemanticPrioritizedFields { + return { + titleField: !item["titleField"] + ? item["titleField"] + : semanticFieldDeserializer(item["titleField"]), + contentFields: !item["prioritizedContentFields"] + ? item["prioritizedContentFields"] + : semanticFieldArrayDeserializer(item["prioritizedContentFields"]), + keywordsFields: !item["prioritizedKeywordsFields"] + ? item["prioritizedKeywordsFields"] + : semanticFieldArrayDeserializer(item["prioritizedKeywordsFields"]), + }; +} + +/** A field that is used as part of the semantic configuration. */ +export interface SemanticField { + /** File name */ + name: string; +} + +export function semanticFieldSerializer(item: SemanticField): any { + return { fieldName: item["name"] }; +} + +export function semanticFieldDeserializer(item: any): SemanticField { + return { + name: item["fieldName"], + }; +} + +export function semanticFieldArraySerializer(result: Array): any[] { + return result.map((item) => { + return semanticFieldSerializer(item); + }); +} + +export function semanticFieldArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return semanticFieldDeserializer(item); + }); +} + +/** Represents score to use for sort order of documents. */ +export enum KnownRankingOrder { + /** Sets sort order as BoostedRerankerScore */ + BoostedRerankerScore = "BoostedRerankerScore", + /** Sets sort order as ReRankerScore */ + RerankerScore = "RerankerScore", +} + +/** + * Represents score to use for sort order of documents. \ + * {@link KnownRankingOrder} can be used interchangeably with RankingOrder, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \ + * **RerankerScore**: Sets sort order as ReRankerScore + */ +export type RankingOrder = string; + +/** Contains configuration options related to vector search. */ +export interface VectorSearch { + /** Defines combinations of configurations to use with vector search. */ + profiles?: VectorSearchProfile[]; + /** Contains configuration options specific to the algorithm used during indexing or querying. */ + algorithms?: VectorSearchAlgorithmConfigurationUnion[]; + /** Contains configuration options on how to vectorize text vector queries. */ + vectorizers?: VectorSearchVectorizerUnion[]; + /** Contains configuration options specific to the compression method used during indexing or querying. */ + compressions?: VectorSearchCompressionUnion[]; +} + +export function vectorSearchSerializer(item: VectorSearch): any { + return { + profiles: !item["profiles"] + ? item["profiles"] + : vectorSearchProfileArraySerializer(item["profiles"]), + algorithms: !item["algorithms"] + ? item["algorithms"] + : vectorSearchAlgorithmConfigurationUnionArraySerializer(item["algorithms"]), + vectorizers: !item["vectorizers"] + ? item["vectorizers"] + : vectorSearchVectorizerUnionArraySerializer(item["vectorizers"]), + compressions: !item["compressions"] + ? item["compressions"] + : vectorSearchCompressionUnionArraySerializer(item["compressions"]), + }; +} + +export function vectorSearchDeserializer(item: any): VectorSearch { + return { + profiles: !item["profiles"] + ? item["profiles"] + : vectorSearchProfileArrayDeserializer(item["profiles"]), + algorithms: !item["algorithms"] + ? item["algorithms"] + : vectorSearchAlgorithmConfigurationUnionArrayDeserializer(item["algorithms"]), + vectorizers: !item["vectorizers"] + ? item["vectorizers"] + : vectorSearchVectorizerUnionArrayDeserializer(item["vectorizers"]), + compressions: !item["compressions"] + ? item["compressions"] + : vectorSearchCompressionUnionArrayDeserializer(item["compressions"]), + }; +} + +export function vectorSearchProfileArraySerializer(result: Array): any[] { + return result.map((item) => { + return vectorSearchProfileSerializer(item); + }); +} + +export function vectorSearchProfileArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return vectorSearchProfileDeserializer(item); + }); +} + +/** Defines a combination of configurations to use with vector search. */ +export interface VectorSearchProfile { + /** The name to associate with this particular vector search profile. */ + name: string; + /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */ + algorithmConfigurationName: string; + /** The name of the vectorization being configured for use with vector search. */ + vectorizerName?: string; + /** The name of the compression method configuration that specifies the compression method and optional parameters. */ + compressionName?: string; +} + +export function vectorSearchProfileSerializer(item: VectorSearchProfile): any { + return { + name: item["name"], + algorithm: item["algorithmConfigurationName"], + vectorizer: item["vectorizerName"], + compression: item["compressionName"], + }; +} + +export function vectorSearchProfileDeserializer(item: any): VectorSearchProfile { + return { + name: item["name"], + algorithmConfigurationName: item["algorithm"], + vectorizerName: item["vectorizer"], + compressionName: item["compression"], + }; +} + +export function vectorSearchAlgorithmConfigurationUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchAlgorithmConfigurationUnionSerializer(item); + }); +} + +export function vectorSearchAlgorithmConfigurationUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchAlgorithmConfigurationUnionDeserializer(item); + }); +} + +/** Contains configuration options specific to the algorithm used during indexing or querying. */ +export interface VectorSearchAlgorithmConfiguration { + /** The name to associate with this particular configuration. */ + name: string; + /** Type of VectorSearchAlgorithmConfiguration. */ + /** The discriminator possible values: hnsw, exhaustiveKnn */ + kind: VectorSearchAlgorithmKind; +} + +export function vectorSearchAlgorithmConfigurationSerializer( + item: VectorSearchAlgorithmConfiguration, +): any { + return { name: item["name"], kind: item["kind"] }; +} + +export function vectorSearchAlgorithmConfigurationDeserializer( + item: any, +): VectorSearchAlgorithmConfiguration { + return { + name: item["name"], + kind: item["kind"], + }; +} + +/** Alias for VectorSearchAlgorithmConfigurationUnion */ +export type VectorSearchAlgorithmConfigurationUnion = + | HnswAlgorithmConfiguration + | ExhaustiveKnnAlgorithmConfiguration + | VectorSearchAlgorithmConfiguration; + +export function vectorSearchAlgorithmConfigurationUnionSerializer( + item: VectorSearchAlgorithmConfigurationUnion, +): any { + switch (item.kind) { + case "hnsw": + return hnswAlgorithmConfigurationSerializer(item as HnswAlgorithmConfiguration); + + case "exhaustiveKnn": + return exhaustiveKnnAlgorithmConfigurationSerializer( + item as ExhaustiveKnnAlgorithmConfiguration, + ); + + default: + return vectorSearchAlgorithmConfigurationSerializer(item); + } +} + +export function vectorSearchAlgorithmConfigurationUnionDeserializer( + item: any, +): VectorSearchAlgorithmConfigurationUnion { + switch (item.kind) { + case "hnsw": + return hnswAlgorithmConfigurationDeserializer(item as HnswAlgorithmConfiguration); + + case "exhaustiveKnn": + return exhaustiveKnnAlgorithmConfigurationDeserializer( + item as ExhaustiveKnnAlgorithmConfiguration, + ); + + default: + return vectorSearchAlgorithmConfigurationDeserializer(item); + } +} + +/** The algorithm used for indexing and querying. */ +export enum KnownVectorSearchAlgorithmKind { + /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */ + Hnsw = "hnsw", + /** Exhaustive KNN algorithm which will perform brute-force search. */ + ExhaustiveKnn = "exhaustiveKnn", +} + +/** + * The algorithm used for indexing and querying. \ + * {@link KnownVectorSearchAlgorithmKind} can be used interchangeably with VectorSearchAlgorithmKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **hnsw**: HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. \ + * **exhaustiveKnn**: Exhaustive KNN algorithm which will perform brute-force search. + */ +export type VectorSearchAlgorithmKind = string; + +/** Contains configuration options specific to the HNSW approximate nearest neighbors algorithm used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search speed and accuracy. */ +export interface HnswAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration { + /** Contains the parameters specific to HNSW algorithm. */ + parameters?: HnswParameters; + /** The name of the kind of algorithm being configured for use with vector search. */ + kind: "hnsw"; +} + +export function hnswAlgorithmConfigurationSerializer(item: HnswAlgorithmConfiguration): any { + return { + name: item["name"], + kind: item["kind"], + hnswParameters: !item["parameters"] + ? item["parameters"] + : hnswParametersSerializer(item["parameters"]), + }; +} + +export function hnswAlgorithmConfigurationDeserializer(item: any): HnswAlgorithmConfiguration { + return { + name: item["name"], + kind: item["kind"], + parameters: !item["hnswParameters"] + ? item["hnswParameters"] + : hnswParametersDeserializer(item["hnswParameters"]), + }; +} + +/** Contains the parameters specific to the HNSW algorithm. */ +export interface HnswParameters { + /** The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time. */ + m?: number; + /** The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns. */ + efConstruction?: number; + /** The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns. */ + efSearch?: number; + /** The similarity metric to use for vector comparisons. */ + metric?: VectorSearchAlgorithmMetric; +} + +export function hnswParametersSerializer(item: HnswParameters): any { + return { + m: item["m"], + efConstruction: item["efConstruction"], + efSearch: item["efSearch"], + metric: item["metric"], + }; +} + +export function hnswParametersDeserializer(item: any): HnswParameters { + return { + m: item["m"], + efConstruction: item["efConstruction"], + efSearch: item["efSearch"], + metric: item["metric"], + }; +} + +/** The similarity metric to use for vector comparisons. It is recommended to choose the same similarity metric as the embedding model was trained on. */ +export enum KnownVectorSearchAlgorithmMetric { + /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */ + Cosine = "cosine", + /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */ + Euclidean = "euclidean", + /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */ + DotProduct = "dotProduct", + /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */ + Hamming = "hamming", +} + +/** + * The similarity metric to use for vector comparisons. It is recommended to choose the same similarity metric as the embedding model was trained on. \ + * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \ + * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \ + * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \ + * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. + */ +export type VectorSearchAlgorithmMetric = string; + +/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */ +export interface ExhaustiveKnnAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration { + /** Contains the parameters specific to exhaustive KNN algorithm. */ + parameters?: ExhaustiveKnnParameters; + /** The name of the kind of algorithm being configured for use with vector search. */ + kind: "exhaustiveKnn"; +} + +export function exhaustiveKnnAlgorithmConfigurationSerializer( + item: ExhaustiveKnnAlgorithmConfiguration, +): any { + return { + name: item["name"], + kind: item["kind"], + exhaustiveKnnParameters: !item["parameters"] + ? item["parameters"] + : exhaustiveKnnParametersSerializer(item["parameters"]), + }; +} + +export function exhaustiveKnnAlgorithmConfigurationDeserializer( + item: any, +): ExhaustiveKnnAlgorithmConfiguration { + return { + name: item["name"], + kind: item["kind"], + parameters: !item["exhaustiveKnnParameters"] + ? item["exhaustiveKnnParameters"] + : exhaustiveKnnParametersDeserializer(item["exhaustiveKnnParameters"]), + }; +} + +/** Contains the parameters specific to exhaustive KNN algorithm. */ +export interface ExhaustiveKnnParameters { + /** The similarity metric to use for vector comparisons. */ + metric?: VectorSearchAlgorithmMetric; +} + +export function exhaustiveKnnParametersSerializer(item: ExhaustiveKnnParameters): any { + return { metric: item["metric"] }; +} + +export function exhaustiveKnnParametersDeserializer(item: any): ExhaustiveKnnParameters { + return { + metric: item["metric"], + }; +} + +export function vectorSearchVectorizerUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchVectorizerUnionSerializer(item); + }); +} + +export function vectorSearchVectorizerUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchVectorizerUnionDeserializer(item); + }); +} + +/** Specifies the vectorization method to be used during query time. */ +export interface VectorSearchVectorizer { + /** The name to associate with this particular vectorization method. */ + vectorizerName: string; + /** Type of VectorSearchVectorizer. */ + /** The discriminator possible values: azureOpenAI, customWebApi, aiServicesVision, aml */ + kind: VectorSearchVectorizerKind; +} + +export function vectorSearchVectorizerSerializer(item: VectorSearchVectorizer): any { + return { name: item["vectorizerName"], kind: item["kind"] }; +} + +export function vectorSearchVectorizerDeserializer(item: any): VectorSearchVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + }; +} + +/** Alias for VectorSearchVectorizerUnion */ +export type VectorSearchVectorizerUnion = + | AzureOpenAIVectorizer + | WebApiVectorizer + | AIServicesVisionVectorizer + | AzureMachineLearningVectorizer + | VectorSearchVectorizer; + +export function vectorSearchVectorizerUnionSerializer(item: VectorSearchVectorizerUnion): any { + switch (item.kind) { + case "azureOpenAI": + return azureOpenAIVectorizerSerializer(item as AzureOpenAIVectorizer); + + case "customWebApi": + return webApiVectorizerSerializer(item as WebApiVectorizer); + + case "aiServicesVision": + return aiServicesVisionVectorizerSerializer(item as AIServicesVisionVectorizer); + + case "aml": + return azureMachineLearningVectorizerSerializer(item as AzureMachineLearningVectorizer); + + default: + return vectorSearchVectorizerSerializer(item); + } +} + +export function vectorSearchVectorizerUnionDeserializer(item: any): VectorSearchVectorizerUnion { + switch (item.kind) { + case "azureOpenAI": + return azureOpenAIVectorizerDeserializer(item as AzureOpenAIVectorizer); + + case "customWebApi": + return webApiVectorizerDeserializer(item as WebApiVectorizer); + + case "aiServicesVision": + return aiServicesVisionVectorizerDeserializer(item as AIServicesVisionVectorizer); + + case "aml": + return azureMachineLearningVectorizerDeserializer(item as AzureMachineLearningVectorizer); + + default: + return vectorSearchVectorizerDeserializer(item); + } +} + +/** The vectorization method to be used during query time. */ +export enum KnownVectorSearchVectorizerKind { + /** Generate embeddings using an Azure OpenAI resource at query time. */ + AzureOpenAI = "azureOpenAI", + /** Generate embeddings using a custom web endpoint at query time. */ + CustomWebApi = "customWebApi", + /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */ + AIServicesVision = "aiServicesVision", + /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. */ + AML = "aml", +} + +/** + * The vectorization method to be used during query time. \ + * {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \ + * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \ + * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \ + * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. + */ +export type VectorSearchVectorizerKind = string; + +/** Specifies the Azure OpenAI resource used to vectorize a query string. */ +export interface AzureOpenAIVectorizer extends VectorSearchVectorizer { + /** Contains the parameters specific to Azure OpenAI embedding vectorization. */ + parameters?: AzureOpenAIVectorizerParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "azureOpenAI"; +} + +export function azureOpenAIVectorizerSerializer(item: AzureOpenAIVectorizer): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + azureOpenAIParameters: !item["parameters"] + ? item["parameters"] + : azureOpenAIVectorizerParametersSerializer(item["parameters"]), + }; +} + +export function azureOpenAIVectorizerDeserializer(item: any): AzureOpenAIVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + parameters: !item["azureOpenAIParameters"] + ? item["azureOpenAIParameters"] + : azureOpenAIVectorizerParametersDeserializer(item["azureOpenAIParameters"]), + }; +} + +/** Specifies the parameters for connecting to the Azure OpenAI resource. */ +export interface AzureOpenAIVectorizerParameters { + /** The resource URI of the Azure OpenAI resource. */ + resourceUrl?: string; + /** ID of the Azure OpenAI model deployment on the designated resource. */ + deploymentName?: string; + /** API key of the designated Azure OpenAI resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** The name of the embedding model that is deployed at the provided deploymentId path. */ + modelName?: AzureOpenAIModelName; +} + +export function azureOpenAIVectorizerParametersSerializer( + item: AzureOpenAIVectorizerParameters, +): any { + return { + resourceUri: item["resourceUrl"], + deploymentId: item["deploymentName"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + modelName: item["modelName"], + }; +} + +export function azureOpenAIVectorizerParametersDeserializer( + item: any, +): AzureOpenAIVectorizerParameters { + return { + resourceUrl: item["resourceUri"], + deploymentName: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + modelName: item["modelName"], + }; +} + +/** The Azure Open AI model name that will be called. */ +export enum KnownAzureOpenAIModelName { + /** TextEmbeddingAda002 model. */ + TextEmbeddingAda002 = "text-embedding-ada-002", + /** TextEmbedding3Large model. */ + TextEmbedding3Large = "text-embedding-3-large", + /** TextEmbedding3Small model. */ + TextEmbedding3Small = "text-embedding-3-small", +} + +/** + * The Azure Open AI model name that will be called. \ + * {@link KnownAzureOpenAIModelName} can be used interchangeably with AzureOpenAIModelName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text-embedding-ada-002**: TextEmbeddingAda002 model. \ + * **text-embedding-3-large**: TextEmbedding3Large model. \ + * **text-embedding-3-small**: TextEmbedding3Small model. + */ +export type AzureOpenAIModelName = string; + +/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */ +export interface WebApiVectorizer extends VectorSearchVectorizer { + /** Specifies the properties of the user-defined vectorizer. */ + webApiParameters?: WebApiVectorizerParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "customWebApi"; +} + +export function webApiVectorizerSerializer(item: WebApiVectorizer): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + customWebApiParameters: !item["webApiParameters"] + ? item["webApiParameters"] + : webApiVectorizerParametersSerializer(item["webApiParameters"]), + }; +} + +export function webApiVectorizerDeserializer(item: any): WebApiVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + webApiParameters: !item["customWebApiParameters"] + ? item["customWebApiParameters"] + : webApiVectorizerParametersDeserializer(item["customWebApiParameters"]), + }; +} + +/** Specifies the properties for connecting to a user-defined vectorizer. */ +export interface WebApiVectorizerParameters { + /** The URI of the Web API providing the vectorizer. */ + url?: string; + /** The headers required to make the HTTP request. */ + httpHeaders?: Record; + /** The method for the HTTP request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; +} + +export function webApiVectorizerParametersSerializer(item: WebApiVectorizerParameters): any { + return { + uri: item["url"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + }; +} + +export function webApiVectorizerParametersDeserializer(item: any): WebApiVectorizerParameters { + return { + url: item["uri"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + }; +} + +/** Clears the identity property of a datasource. */ +export interface AIServicesVisionVectorizer extends VectorSearchVectorizer { + /** Contains the parameters specific to AI Services Vision embedding vectorization. */ + aiServicesVisionParameters?: AIServicesVisionParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "aiServicesVision"; +} + +export function aiServicesVisionVectorizerSerializer(item: AIServicesVisionVectorizer): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + AIServicesVisionParameters: !item["aiServicesVisionParameters"] + ? item["aiServicesVisionParameters"] + : aiServicesVisionParametersSerializer(item["aiServicesVisionParameters"]), + }; +} + +export function aiServicesVisionVectorizerDeserializer(item: any): AIServicesVisionVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + aiServicesVisionParameters: !item["AIServicesVisionParameters"] + ? item["AIServicesVisionParameters"] + : aiServicesVisionParametersDeserializer(item["AIServicesVisionParameters"]), + }; +} + +/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ +export interface AIServicesVisionParameters { + /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ + modelVersion: string; + /** The resource URI of the AI Services resource. */ + resourceUri: string; + /** API key of the designated AI Services resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; +} + +export function aiServicesVisionParametersSerializer(item: AIServicesVisionParameters): any { + return { + modelVersion: item["modelVersion"], + resourceUri: item["resourceUri"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + }; +} + +export function aiServicesVisionParametersDeserializer(item: any): AIServicesVisionParameters { + return { + modelVersion: item["modelVersion"], + resourceUri: item["resourceUri"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + }; +} + +/** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. */ +export interface AzureMachineLearningVectorizer extends VectorSearchVectorizer { + /** Specifies the properties of the AML vectorizer. */ + amlParameters?: AzureMachineLearningParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "aml"; +} + +export function azureMachineLearningVectorizerSerializer( + item: AzureMachineLearningVectorizer, +): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + amlParameters: !item["amlParameters"] + ? item["amlParameters"] + : azureMachineLearningParametersSerializer(item["amlParameters"]), + }; +} + +export function azureMachineLearningVectorizerDeserializer( + item: any, +): AzureMachineLearningVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + amlParameters: !item["amlParameters"] + ? item["amlParameters"] + : azureMachineLearningParametersDeserializer(item["amlParameters"]), + }; +} + +/** Specifies the properties for connecting to an AML vectorizer. */ +export interface AzureMachineLearningParameters { + /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ + scoringUri: string; + /** (Required for key authentication) The key for the AML service. */ + authenticationKey?: string; + /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ + resourceId?: string; + /** (Optional) When specified, indicates the timeout for the http client making the API call. */ + timeout?: string; + /** (Optional for token authentication). The region the AML service is deployed in. */ + region?: string; + /** The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. */ + modelName?: AIFoundryModelCatalogName; +} + +export function azureMachineLearningParametersSerializer( + item: AzureMachineLearningParameters, +): any { + return { + uri: item["scoringUri"], + key: item["authenticationKey"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + modelName: item["modelName"], + }; +} + +export function azureMachineLearningParametersDeserializer( + item: any, +): AzureMachineLearningParameters { + return { + scoringUri: item["uri"], + authenticationKey: item["key"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + modelName: item["modelName"], + }; +} + +/** The name of the embedding model from the Azure AI Foundry Catalog that will be called. */ +export enum KnownAIFoundryModelCatalogName { + /** OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32 */ + OpenAiclipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", + /** OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336 */ + OpenAiclipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", + /** Facebook-DinoV2-Image-Embeddings-ViT-Base */ + FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", + /** Facebook-DinoV2-Image-Embeddings-ViT-Giant */ + FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", + /** Cohere-embed-v3-english */ + CohereEmbedV3English = "Cohere-embed-v3-english", + /** Cohere-embed-v3-multilingual */ + CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", +} + +/** + * The name of the embedding model from the Azure AI Foundry Catalog that will be called. \ + * {@link KnownAIFoundryModelCatalogName} can be used interchangeably with AIFoundryModelCatalogName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32**: OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32 \ + * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336**: OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336 \ + * **Facebook-DinoV2-Image-Embeddings-ViT-Base**: Facebook-DinoV2-Image-Embeddings-ViT-Base \ + * **Facebook-DinoV2-Image-Embeddings-ViT-Giant**: Facebook-DinoV2-Image-Embeddings-ViT-Giant \ + * **Cohere-embed-v3-english**: Cohere-embed-v3-english \ + * **Cohere-embed-v3-multilingual**: Cohere-embed-v3-multilingual + */ +export type AIFoundryModelCatalogName = string; + +export function vectorSearchCompressionUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchCompressionUnionSerializer(item); + }); +} + +export function vectorSearchCompressionUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchCompressionUnionDeserializer(item); + }); +} + +/** Contains configuration options specific to the compression method used during indexing or querying. */ +export interface VectorSearchCompression { + /** The name to associate with this particular configuration. */ + compressionName: string; + /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */ + rerankWithOriginalVectors?: boolean; + /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */ + defaultOversampling?: number; + /** Contains the options for rescoring. */ + rescoringOptions?: RescoringOptions; + /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */ + truncationDimension?: number; + /** Type of VectorSearchCompression. */ + /** The discriminator possible values: scalarQuantization, binaryQuantization */ + kind: VectorSearchCompressionKind; +} + +export function vectorSearchCompressionSerializer(item: VectorSearchCompression): any { + return { + name: item["compressionName"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsSerializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +export function vectorSearchCompressionDeserializer(item: any): VectorSearchCompression { + return { + compressionName: item["name"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsDeserializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +/** Alias for VectorSearchCompressionUnion */ +export type VectorSearchCompressionUnion = + | ScalarQuantizationCompression + | BinaryQuantizationCompression + | VectorSearchCompression; + +export function vectorSearchCompressionUnionSerializer(item: VectorSearchCompressionUnion): any { + switch (item.kind) { + case "scalarQuantization": + return scalarQuantizationCompressionSerializer(item as ScalarQuantizationCompression); + + case "binaryQuantization": + return binaryQuantizationCompressionSerializer(item as BinaryQuantizationCompression); + + default: + return vectorSearchCompressionSerializer(item); + } +} + +export function vectorSearchCompressionUnionDeserializer(item: any): VectorSearchCompressionUnion { + switch (item.kind) { + case "scalarQuantization": + return scalarQuantizationCompressionDeserializer(item as ScalarQuantizationCompression); + + case "binaryQuantization": + return binaryQuantizationCompressionDeserializer(item as BinaryQuantizationCompression); + + default: + return vectorSearchCompressionDeserializer(item); + } +} + +/** Contains the options for rescoring. */ +export interface RescoringOptions { + /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */ + enableRescoring?: boolean; + /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */ + defaultOversampling?: number; + /** Controls the storage method for original vectors. This setting is immutable. */ + rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; +} + +export function rescoringOptionsSerializer(item: RescoringOptions): any { + return { + enableRescoring: item["enableRescoring"], + defaultOversampling: item["defaultOversampling"], + rescoreStorageMethod: item["rescoreStorageMethod"], + }; +} + +export function rescoringOptionsDeserializer(item: any): RescoringOptions { + return { + enableRescoring: item["enableRescoring"], + defaultOversampling: item["defaultOversampling"], + rescoreStorageMethod: item["rescoreStorageMethod"], + }; +} + +/** The storage method for the original full-precision vectors used for rescoring and internal index operations. */ +export enum KnownVectorSearchCompressionRescoreStorageMethod { + /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */ + PreserveOriginals = "preserveOriginals", + /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */ + DiscardOriginals = "discardOriginals", +} + +/** + * The storage method for the original full-precision vectors used for rescoring and internal index operations. \ + * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \ + * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. + */ +export type VectorSearchCompressionRescoreStorageMethod = string; + +/** The compression method used for indexing and querying. */ +export enum KnownVectorSearchCompressionKind { + /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */ + ScalarQuantization = "scalarQuantization", + /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */ + BinaryQuantization = "binaryQuantization", +} + +/** + * The compression method used for indexing and querying. \ + * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \ + * **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. + */ +export type VectorSearchCompressionKind = string; + +/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */ +export interface ScalarQuantizationCompression extends VectorSearchCompression { + /** Contains the parameters specific to Scalar Quantization. */ + parameters?: ScalarQuantizationParameters; + /** The name of the kind of compression method being configured for use with vector search. */ + kind: "scalarQuantization"; +} + +export function scalarQuantizationCompressionSerializer(item: ScalarQuantizationCompression): any { + return { + name: item["compressionName"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsSerializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + scalarQuantizationParameters: !item["parameters"] + ? item["parameters"] + : scalarQuantizationParametersSerializer(item["parameters"]), + }; +} + +export function scalarQuantizationCompressionDeserializer( + item: any, +): ScalarQuantizationCompression { + return { + compressionName: item["name"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsDeserializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + parameters: !item["scalarQuantizationParameters"] + ? item["scalarQuantizationParameters"] + : scalarQuantizationParametersDeserializer(item["scalarQuantizationParameters"]), + }; +} + +/** Contains the parameters specific to Scalar Quantization. */ +export interface ScalarQuantizationParameters { + /** The quantized data type of compressed vector values. */ + quantizedDataType?: VectorSearchCompressionTarget; +} + +export function scalarQuantizationParametersSerializer(item: ScalarQuantizationParameters): any { + return { quantizedDataType: item["quantizedDataType"] }; +} + +export function scalarQuantizationParametersDeserializer(item: any): ScalarQuantizationParameters { + return { + quantizedDataType: item["quantizedDataType"], + }; +} + +/** The quantized data type of compressed vector values. */ +export enum KnownVectorSearchCompressionTarget { + /** 8-bit signed integer. */ + Int8 = "int8", +} + +/** + * The quantized data type of compressed vector values. \ + * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **int8**: 8-bit signed integer. + */ +export type VectorSearchCompressionTarget = string; + +/** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */ +export interface BinaryQuantizationCompression extends VectorSearchCompression { + /** The name of the kind of compression method being configured for use with vector search. */ + kind: "binaryQuantization"; +} + +export function binaryQuantizationCompressionSerializer(item: BinaryQuantizationCompression): any { + return { + name: item["compressionName"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsSerializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +export function binaryQuantizationCompressionDeserializer( + item: any, +): BinaryQuantizationCompression { + return { + compressionName: item["name"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsDeserializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +/** A value indicating whether permission filtering is enabled for the index. */ +export enum KnownSearchIndexPermissionFilterOption { + /** enabled. */ + Enabled = "enabled", + /** disabled. */ + Disabled = "disabled", +} + +/** + * A value indicating whether permission filtering is enabled for the index. \ + * {@link KnownSearchIndexPermissionFilterOption} can be used interchangeably with SearchIndexPermissionFilterOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **enabled**: enabled. \ + * **disabled**: disabled. + */ +export type SearchIndexPermissionFilterOption = string; + +/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */ +export interface _ListIndexesResult { + /** The indexes in the Search service. */ + readonly indexes: SearchIndex[]; +} + +export function _listIndexesResultDeserializer(item: any): _ListIndexesResult { + return { + indexes: searchIndexArrayDeserializer(item["value"]), + }; +} + +export function searchIndexArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchIndexSerializer(item); + }); +} + +export function searchIndexArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexDeserializer(item); + }); +} + +/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ +export interface GetIndexStatisticsResult { + /** The number of documents in the index. */ + documentCount: number; + /** The amount of storage in bytes consumed by the index. */ + storageSize: number; + /** The amount of memory in bytes consumed by vectors in the index. */ + vectorIndexSize: number; +} + +export function getIndexStatisticsResultDeserializer(item: any): GetIndexStatisticsResult { + return { + documentCount: item["documentCount"], + storageSize: item["storageSize"], + vectorIndexSize: item["vectorIndexSize"], + }; +} + +/** Specifies some text and analysis components used to break that text into tokens. */ +export interface AnalyzeTextOptions { + /** The text to break into tokens. */ + text: string; + /** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. */ + analyzerName?: LexicalAnalyzerName; + /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. */ + tokenizerName?: LexicalTokenizerName; + /** The name of the normalizer to use to normalize the given text. */ + normalizerName?: LexicalNormalizerName; + /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ + tokenFilters?: TokenFilterName[]; + /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ + charFilters?: CharFilterName[]; +} + +export function analyzeTextOptionsSerializer(item: AnalyzeTextOptions): any { + return { + text: item["text"], + analyzer: item["analyzerName"], + tokenizer: item["tokenizerName"], + normalizer: item["normalizerName"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +/** The result of testing an analyzer on text. */ +export interface AnalyzeResult { + /** The list of tokens returned by the analyzer specified in the request. */ + tokens: AnalyzedTokenInfo[]; +} + +export function analyzeResultDeserializer(item: any): AnalyzeResult { + return { + tokens: analyzedTokenInfoArrayDeserializer(item["tokens"]), + }; +} + +export function analyzedTokenInfoArraySerializer(result: Array): any[] { + return result.map((item) => { + return analyzedTokenInfoSerializer(item); + }); +} + +export function analyzedTokenInfoArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return analyzedTokenInfoDeserializer(item); + }); +} + +/** Information about a token returned by an analyzer. */ +export interface AnalyzedTokenInfo { + /** The token returned by the analyzer. */ + token: string; + /** The index of the first character of the token in the input text. */ + startOffset: number; + /** The index of the last character of the token in the input text. */ + endOffset: number; + /** The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other. */ + position: number; +} + +export function analyzedTokenInfoSerializer(item: AnalyzedTokenInfo): any { + return { + token: item["token"], + startOffset: item["startOffset"], + endOffset: item["endOffset"], + position: item["position"], + }; +} + +export function analyzedTokenInfoDeserializer(item: any): AnalyzedTokenInfo { + return { + token: item["token"], + startOffset: item["startOffset"], + endOffset: item["endOffset"], + position: item["position"], + }; +} + +/** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */ +export interface SearchAlias { + /** The name of the alias. */ + name: string; + /** The name of the index this alias maps to. Only one index name may be specified. */ + indexes: string[]; + /** The ETag of the alias. */ + eTag?: string; +} + +export function searchAliasSerializer(item: SearchAlias): any { + return { + name: item["name"], + indexes: item["indexes"].map((p: any) => { + return p; + }), + "@odata.etag": item["eTag"], + }; +} + +export function searchAliasDeserializer(item: any): SearchAlias { + return { + name: item["name"], + indexes: item["indexes"].map((p: any) => { + return p; + }), + eTag: item["@odata.etag"], + }; +} + +/** Response from a List Aliases request. If successful, it includes the associated index mappings for all aliases. */ +export interface _ListAliasesResult { + /** The aliases in the Search service. */ + readonly aliases: SearchAlias[]; +} + +export function _listAliasesResultDeserializer(item: any): _ListAliasesResult { + return { + aliases: searchAliasArrayDeserializer(item["value"]), + }; +} + +export function searchAliasArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchAliasSerializer(item); + }); +} + +export function searchAliasArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchAliasDeserializer(item); + }); +} + +/** Represents a knowledge base definition. */ +export interface KnowledgeBase { + /** The name of the knowledge base. */ + readonly name: string; + /** Knowledge sources referenced by this knowledge base. */ + knowledgeSources: KnowledgeSourceReference[]; + /** Contains configuration options on how to connect to AI models. */ + models?: KnowledgeBaseModelUnion[]; + /** The retrieval reasoning effort configuration. */ + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; + /** The output mode for the knowledge base. */ + outputMode?: KnowledgeRetrievalOutputMode; + /** The ETag of the knowledge base. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. */ + encryptionKey?: SearchResourceEncryptionKey; + /** The description of the knowledge base. */ + description?: string; + /** Instructions considered by the knowledge base when developing query plan. */ + retrievalInstructions?: string; + /** Instructions considered by the knowledge base when generating answers. */ + answerInstructions?: string; +} + +export function knowledgeBaseSerializer(item: KnowledgeBase): any { + return { + knowledgeSources: knowledgeSourceReferenceArraySerializer(item["knowledgeSources"]), + models: !item["models"] + ? item["models"] + : knowledgeBaseModelUnionArraySerializer(item["models"]), + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionSerializer(item["retrievalReasoningEffort"]), + outputMode: item["outputMode"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + description: item["description"], + retrievalInstructions: item["retrievalInstructions"], + answerInstructions: item["answerInstructions"], + }; +} + +export function knowledgeBaseDeserializer(item: any): KnowledgeBase { + return { + name: item["name"], + knowledgeSources: knowledgeSourceReferenceArrayDeserializer(item["knowledgeSources"]), + models: !item["models"] + ? item["models"] + : knowledgeBaseModelUnionArrayDeserializer(item["models"]), + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionDeserializer(item["retrievalReasoningEffort"]), + outputMode: item["outputMode"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + description: item["description"], + retrievalInstructions: item["retrievalInstructions"], + answerInstructions: item["answerInstructions"], + }; +} + +export function knowledgeSourceReferenceArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeSourceReferenceSerializer(item); + }); +} + +export function knowledgeSourceReferenceArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeSourceReferenceDeserializer(item); + }); +} + +/** Reference to a knowledge source. */ +export interface KnowledgeSourceReference { + /** The name of the knowledge source. */ + name: string; +} + +export function knowledgeSourceReferenceSerializer(item: KnowledgeSourceReference): any { + return { name: item["name"] }; +} + +export function knowledgeSourceReferenceDeserializer(item: any): KnowledgeSourceReference { + return { + name: item["name"], + }; +} + +export function knowledgeBaseModelUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseModelUnionSerializer(item); + }); +} + +export function knowledgeBaseModelUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseModelUnionDeserializer(item); + }); +} + +/** Specifies the connection parameters for the model to use for query planning. */ +export interface KnowledgeBaseModel { + /** The AI model to be used for query planning. */ + /** The discriminator possible values: azureOpenAI */ + kind: KnowledgeBaseModelKind; +} + +export function knowledgeBaseModelSerializer(item: KnowledgeBaseModel): any { + return { kind: item["kind"] }; +} + +export function knowledgeBaseModelDeserializer(item: any): KnowledgeBaseModel { + return { + kind: item["kind"], + }; +} + +/** Alias for KnowledgeBaseModelUnion */ +export type KnowledgeBaseModelUnion = KnowledgeBaseAzureOpenAIModel | KnowledgeBaseModel; + +export function knowledgeBaseModelUnionSerializer(item: KnowledgeBaseModelUnion): any { + switch (item.kind) { + case "azureOpenAI": + return knowledgeBaseAzureOpenAIModelSerializer(item as KnowledgeBaseAzureOpenAIModel); + + default: + return knowledgeBaseModelSerializer(item); + } +} + +export function knowledgeBaseModelUnionDeserializer(item: any): KnowledgeBaseModelUnion { + switch (item.kind) { + case "azureOpenAI": + return knowledgeBaseAzureOpenAIModelDeserializer(item as KnowledgeBaseAzureOpenAIModel); + + default: + return knowledgeBaseModelDeserializer(item); + } +} + +/** The AI model to be used for query planning. */ +export enum KnownKnowledgeBaseModelKind { + /** Use Azure Open AI models for query planning. */ + AzureOpenAI = "azureOpenAI", +} + +/** + * The AI model to be used for query planning. \ + * {@link KnownKnowledgeBaseModelKind} can be used interchangeably with KnowledgeBaseModelKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **azureOpenAI**: Use Azure Open AI models for query planning. + */ +export type KnowledgeBaseModelKind = string; + +/** Specifies the Azure OpenAI resource used to do query planning. */ +export interface KnowledgeBaseAzureOpenAIModel extends KnowledgeBaseModel { + kind: "azureOpenAI"; + /** Azure OpenAI parameters. */ + azureOpenAIParameters: AzureOpenAiParameters; +} + +export function knowledgeBaseAzureOpenAIModelSerializer(item: KnowledgeBaseAzureOpenAIModel): any { + return { + kind: item["kind"], + azureOpenAIParameters: azureOpenAiParametersSerializer(item["azureOpenAIParameters"]), + }; +} + +export function knowledgeBaseAzureOpenAIModelDeserializer( + item: any, +): KnowledgeBaseAzureOpenAIModel { + return { + kind: item["kind"], + azureOpenAIParameters: azureOpenAiParametersDeserializer(item["azureOpenAIParameters"]), + }; +} + +/** Specifies the parameters for connecting to the Azure OpenAI resource. */ +export interface AzureOpenAiParameters { + /** The resource URI of the Azure OpenAI resource. */ + resourceUri: string; + /** ID of the Azure OpenAI model deployment on the designated resource. */ + deploymentId: string; + /** API key of the designated Azure OpenAI resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: string; + /** The name of the embedding model that is deployed at the provided deploymentId path. */ + modelName?: AzureOpenAIModelName; + /** The authentication method to use when connecting to the Azure OpenAI resource. */ + authenticationMethod?: string; +} + +export function azureOpenAiParametersSerializer(item: AzureOpenAiParameters): any { + return { + resourceUri: item["resourceUri"], + deploymentId: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: item["authIdentity"], + modelName: item["modelName"], + authenticationMethod: item["authenticationMethod"], + }; +} + +export function azureOpenAiParametersDeserializer(item: any): AzureOpenAiParameters { + return { + resourceUri: item["resourceUri"], + deploymentId: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: item["authIdentity"], + modelName: item["modelName"], + authenticationMethod: item["authenticationMethod"], + }; +} + +/** Result from listing knowledge bases. */ +export interface _ListKnowledgeBasesResult { + /** The knowledge bases in the service. */ + value: KnowledgeBase[]; +} + +export function _listKnowledgeBasesResultDeserializer(item: any): _ListKnowledgeBasesResult { + return { + value: knowledgeBaseArrayDeserializer(item["value"]), + }; +} + +export function knowledgeBaseArraySerializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseSerializer(item); + }); +} + +export function knowledgeBaseArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseDeserializer(item); + }); +} + +/** Represents a knowledge source definition. */ +export interface KnowledgeSource { + /** The name of the knowledge source. */ + readonly name: string; + /** Optional user-defined description. */ + description?: string; + /** The type of the knowledge source. */ + /** The discriminator possible values: searchIndex, azureBlob, indexedSharePoint, indexedOneLake, web, remoteSharePoint */ + kind: KnowledgeSourceKind; + /** The ETag of the agent. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. */ + encryptionKey?: SearchResourceEncryptionKey; +} + +export function knowledgeSourceSerializer(item: KnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + }; +} + +export function knowledgeSourceDeserializer(item: any): KnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + }; +} + +/** Alias for KnowledgeSourceUnion */ +export type KnowledgeSourceUnion = + | SearchIndexKnowledgeSource + | AzureBlobKnowledgeSource + | IndexedSharePointKnowledgeSource + | IndexedOneLakeKnowledgeSource + | WebKnowledgeSource + | RemoteSharePointKnowledgeSource + | KnowledgeSource; + +export function knowledgeSourceUnionSerializer(item: KnowledgeSourceUnion): any { + switch (item.kind) { + case "searchIndex": + return searchIndexKnowledgeSourceSerializer(item as SearchIndexKnowledgeSource); + + case "azureBlob": + return azureBlobKnowledgeSourceSerializer(item as AzureBlobKnowledgeSource); + + case "indexedSharePoint": + return indexedSharePointKnowledgeSourceSerializer(item as IndexedSharePointKnowledgeSource); + + case "indexedOneLake": + return indexedOneLakeKnowledgeSourceSerializer(item as IndexedOneLakeKnowledgeSource); + + case "web": + return webKnowledgeSourceSerializer(item as WebKnowledgeSource); + + case "remoteSharePoint": + return remoteSharePointKnowledgeSourceSerializer(item as RemoteSharePointKnowledgeSource); + + default: + return knowledgeSourceSerializer(item); + } +} + +export function knowledgeSourceUnionDeserializer(item: any): KnowledgeSourceUnion { + switch (item.kind) { + case "searchIndex": + return searchIndexKnowledgeSourceDeserializer(item as SearchIndexKnowledgeSource); + + case "azureBlob": + return azureBlobKnowledgeSourceDeserializer(item as AzureBlobKnowledgeSource); + + case "indexedSharePoint": + return indexedSharePointKnowledgeSourceDeserializer(item as IndexedSharePointKnowledgeSource); + + case "indexedOneLake": + return indexedOneLakeKnowledgeSourceDeserializer(item as IndexedOneLakeKnowledgeSource); + + case "web": + return webKnowledgeSourceDeserializer(item as WebKnowledgeSource); + + case "remoteSharePoint": + return remoteSharePointKnowledgeSourceDeserializer(item as RemoteSharePointKnowledgeSource); + + default: + return knowledgeSourceDeserializer(item); + } +} + +/** The kind of the knowledge source. */ +export enum KnownKnowledgeSourceKind { + /** A knowledge source that reads data from a Search Index. */ + SearchIndex = "searchIndex", + /** A knowledge source that read and ingest data from Azure Blob Storage to a Search Index. */ + AzureBlob = "azureBlob", + /** A knowledge source that reads data from indexed SharePoint. */ + IndexedSharePoint = "indexedSharePoint", + /** A knowledge source that reads data from indexed OneLake. */ + IndexedOneLake = "indexedOneLake", + /** A knowledge source that reads data from the web. */ + Web = "web", + /** A knowledge source that reads data from remote SharePoint. */ + RemoteSharePoint = "remoteSharePoint", +} + +/** + * The kind of the knowledge source. \ + * {@link KnownKnowledgeSourceKind} can be used interchangeably with KnowledgeSourceKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **searchIndex**: A knowledge source that reads data from a Search Index. \ + * **azureBlob**: A knowledge source that read and ingest data from Azure Blob Storage to a Search Index. \ + * **indexedSharePoint**: A knowledge source that reads data from indexed SharePoint. \ + * **indexedOneLake**: A knowledge source that reads data from indexed OneLake. \ + * **web**: A knowledge source that reads data from the web. \ + * **remoteSharePoint**: A knowledge source that reads data from remote SharePoint. + */ +export type KnowledgeSourceKind = string; + +/** Knowledge Source targeting a search index. */ +export interface SearchIndexKnowledgeSource extends KnowledgeSource { + kind: "searchIndex"; + /** The parameters for the knowledge source. */ + searchIndexParameters: SearchIndexKnowledgeSourceParameters; +} + +export function searchIndexKnowledgeSourceSerializer(item: SearchIndexKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + searchIndexParameters: searchIndexKnowledgeSourceParametersSerializer( + item["searchIndexParameters"], + ), + }; +} + +export function searchIndexKnowledgeSourceDeserializer(item: any): SearchIndexKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + searchIndexParameters: searchIndexKnowledgeSourceParametersDeserializer( + item["searchIndexParameters"], + ), + }; +} + +/** Parameters for search index knowledge source. */ +export interface SearchIndexKnowledgeSourceParameters { + /** The name of the Search index. */ + searchIndexName: string; + /** Used to request additional fields for referenced source data. */ + sourceDataSelect?: string; +} + +export function searchIndexKnowledgeSourceParametersSerializer( + item: SearchIndexKnowledgeSourceParameters, +): any { + return { + searchIndexName: item["searchIndexName"], + sourceDataSelect: item["sourceDataSelect"], + }; +} + +export function searchIndexKnowledgeSourceParametersDeserializer( + item: any, +): SearchIndexKnowledgeSourceParameters { + return { + searchIndexName: item["searchIndexName"], + sourceDataSelect: item["sourceDataSelect"], + }; +} + +/** Configuration for Azure Blob Storage knowledge source. */ +export interface AzureBlobKnowledgeSource extends KnowledgeSource { + kind: "azureBlob"; + /** The type of the knowledge source. */ + azureBlobParameters: AzureBlobKnowledgeSourceParameters; +} + +export function azureBlobKnowledgeSourceSerializer(item: AzureBlobKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + azureBlobParameters: azureBlobKnowledgeSourceParametersSerializer(item["azureBlobParameters"]), + }; +} + +export function azureBlobKnowledgeSourceDeserializer(item: any): AzureBlobKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + azureBlobParameters: azureBlobKnowledgeSourceParametersDeserializer( + item["azureBlobParameters"], + ), + }; +} + +/** Parameters for Azure Blob Storage knowledge source. */ +export interface AzureBlobKnowledgeSourceParameters { + /** An explicit identity to use for this knowledge source. */ + identity?: SearchIndexerDataIdentityUnion; + /** Key-based connection string or the ResourceId format if using a managed identity. */ + connectionString: string; + /** The name of the blob storage container. */ + containerName: string; + /** Optional folder path within the container. */ + folderPath?: string; + /** Optional vectorizer configuration for vectorizing content. */ + embeddingModel?: VectorSearchVectorizerUnion; + /** Optional chat completion model for image verbalization or context extraction. */ + chatCompletionModel?: KnowledgeBaseModelUnion; + /** Optional schedule for data ingestion. */ + ingestionSchedule?: IndexingSchedule; + /** Resources created by the knowledge source. */ + readonly createdResources?: CreatedResources; + /** Indicates whether image verbalization should be disabled. */ + disableImageVerbalization?: boolean; +} + +export function azureBlobKnowledgeSourceParametersSerializer( + item: AzureBlobKnowledgeSourceParameters, +): any { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + folderPath: item["folderPath"], + embeddingModel: !item["embeddingModel"] + ? item["embeddingModel"] + : vectorSearchVectorizerUnionSerializer(item["embeddingModel"]), + chatCompletionModel: !item["chatCompletionModel"] + ? item["chatCompletionModel"] + : knowledgeBaseModelUnionSerializer(item["chatCompletionModel"]), + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleSerializer(item["ingestionSchedule"]), + disableImageVerbalization: item["disableImageVerbalization"], + }; +} + +export function azureBlobKnowledgeSourceParametersDeserializer( + item: any, +): AzureBlobKnowledgeSourceParameters { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + folderPath: item["folderPath"], + embeddingModel: !item["embeddingModel"] + ? item["embeddingModel"] + : vectorSearchVectorizerUnionDeserializer(item["embeddingModel"]), + chatCompletionModel: !item["chatCompletionModel"] + ? item["chatCompletionModel"] + : knowledgeBaseModelUnionDeserializer(item["chatCompletionModel"]), + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleDeserializer(item["ingestionSchedule"]), + createdResources: !item["createdResources"] + ? item["createdResources"] + : createdResourcesDeserializer(item["createdResources"]), + disableImageVerbalization: item["disableImageVerbalization"], + }; +} + +/** Represents a schedule for indexer execution. */ +export interface IndexingSchedule { + /** The interval of time between indexer executions. */ + interval: string; + /** The time when an indexer should start running. */ + startTime?: Date; +} + +export function indexingScheduleSerializer(item: IndexingSchedule): any { + return { + interval: item["interval"], + startTime: !item["startTime"] ? item["startTime"] : item["startTime"].toISOString(), + }; +} + +export function indexingScheduleDeserializer(item: any): IndexingSchedule { + return { + interval: item["interval"], + startTime: !item["startTime"] ? item["startTime"] : new Date(item["startTime"]), + }; +} + +/** Resources created by the knowledge source. Keys represent resource types (e.g., 'datasource', 'indexer', 'skillset', 'index') and values represent resource names. */ +export interface CreatedResources { + /** Additional properties */ + additionalProperties?: Record; +} + +export function createdResourcesSerializer(item: CreatedResources): any { + return { ...serializeRecord(item.additionalProperties ?? {}) }; +} + +export function createdResourcesDeserializer(item: any): CreatedResources { + return { + additionalProperties: serializeRecord(item, []), + }; +} + +/** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ +export enum KnownBlobIndexerDataToExtract { + /** Indexes just the standard blob properties and user-specified metadata. */ + StorageMetadata = "storageMetadata", + /** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */ + AllMetadata = "allMetadata", + /** Extracts all metadata and textual content from each blob. */ + ContentAndMetadata = "contentAndMetadata", +} + +/** + * Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. \ + * {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \ + * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \ + * **contentAndMetadata**: Extracts all metadata and textual content from each blob. + */ +export type BlobIndexerDataToExtract = string; + +/** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */ +export enum KnownBlobIndexerImageAction { + /** Ignores embedded images or image files in the data set. This is the default. */ + None = "none", + /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */ + GenerateNormalizedImages = "generateNormalizedImages", + /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. */ + GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage", +} + +/** + * Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. \ + * {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Ignores embedded images or image files in the data set. This is the default. \ + * **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \ + * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. + */ +export type BlobIndexerImageAction = string; + +/** Represents the parsing mode for indexing from an Azure blob data source. */ +export enum KnownBlobIndexerParsingMode { + /** Set to default for normal file processing. */ + Default = "default", + /** Set to text to improve indexing performance on plain text files in blob storage. */ + Text = "text", + /** Set to delimitedText when blobs are plain CSV files. */ + DelimitedText = "delimitedText", + /** Set to json to extract structured content from JSON files. */ + Json = "json", + /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */ + JsonArray = "jsonArray", + /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */ + JsonLines = "jsonLines", + /** Set to markdown to extract content from markdown files. */ + Markdown = "markdown", +} + +/** + * Represents the parsing mode for indexing from an Azure blob data source. \ + * {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **default**: Set to default for normal file processing. \ + * **text**: Set to text to improve indexing performance on plain text files in blob storage. \ + * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \ + * **json**: Set to json to extract structured content from JSON files. \ + * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \ + * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. \ + * **markdown**: Set to markdown to extract content from markdown files. + */ +export type BlobIndexerParsingMode = string; + +/** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ +export enum KnownMarkdownHeaderDepth { + /** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */ + H1 = "h1", + /** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */ + H2 = "h2", + /** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */ + H3 = "h3", + /** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */ + H4 = "h4", + /** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */ + H5 = "h5", + /** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */ + H6 = "h6", +} + +/** + * Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. \ + * {@link KnownMarkdownHeaderDepth} can be used interchangeably with MarkdownHeaderDepth, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **h1**: Indicates that headers up to a level of h1 will be considered while grouping markdown content. \ + * **h2**: Indicates that headers up to a level of h2 will be considered while grouping markdown content. \ + * **h3**: Indicates that headers up to a level of h3 will be considered while grouping markdown content. \ + * **h4**: Indicates that headers up to a level of h4 will be considered while grouping markdown content. \ + * **h5**: Indicates that headers up to a level of h5 will be considered while grouping markdown content. \ + * **h6**: Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. + */ +export type MarkdownHeaderDepth = string; + +/** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ +export enum KnownMarkdownParsingSubmode { + /** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */ + OneToMany = "oneToMany", + /** Indicates that each markdown file will be parsed into a single search document. */ + OneToOne = "oneToOne", +} + +/** + * Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. \ + * {@link KnownMarkdownParsingSubmode} can be used interchangeably with MarkdownParsingSubmode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneToMany**: Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. \ + * **oneToOne**: Indicates that each markdown file will be parsed into a single search document. + */ +export type MarkdownParsingSubmode = string; + +/** Determines algorithm for text extraction from PDF files in Azure blob storage. */ +export enum KnownBlobIndexerPDFTextRotationAlgorithm { + /** Leverages normal text extraction. This is the default. */ + None = "none", + /** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */ + DetectAngles = "detectAngles", +} + +/** + * Determines algorithm for text extraction from PDF files in Azure blob storage. \ + * {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Leverages normal text extraction. This is the default. \ + * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. + */ +export type BlobIndexerPDFTextRotationAlgorithm = string; + +/** Result from listing knowledge sources. */ +export interface _ListKnowledgeSourcesResult { + /** The knowledge sources in the service. */ + value: KnowledgeSourceUnion[]; +} + +export function _listKnowledgeSourcesResultDeserializer(item: any): _ListKnowledgeSourcesResult { + return { + value: knowledgeSourceUnionArrayDeserializer(item["value"]), + }; +} + +export function knowledgeSourceUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return knowledgeSourceUnionSerializer(item); + }); +} + +export function knowledgeSourceUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return knowledgeSourceUnionDeserializer(item); + }); +} + +/** Response from a get service statistics request. If successful, it includes service level counters and limits. */ +export interface SearchServiceStatistics { + /** Service level resource counters. */ + counters: ServiceCounters; + /** Service level general limits. */ + limits: ServiceLimits; + /** Service level indexer runtime consumption. */ + indexersRuntime?: ServiceIndexersRuntime; +} + +export function searchServiceStatisticsDeserializer(item: any): SearchServiceStatistics { + return { + counters: serviceCountersDeserializer(item["counters"]), + limits: serviceLimitsDeserializer(item["limits"]), + indexersRuntime: !item["indexersRuntime"] + ? item["indexersRuntime"] + : serviceIndexersRuntimeDeserializer(item["indexersRuntime"]), + }; +} + +/** Represents service-level resource counters and quotas. */ +export interface ServiceCounters { + /** Total number of aliases. */ + aliasCounter: ResourceCounter; + /** Total number of documents across all indexes in the service. */ + documentCounter: ResourceCounter; + /** Total number of indexes. */ + indexCounter: ResourceCounter; + /** Total number of indexers. */ + indexerCounter: ResourceCounter; + /** Total number of data sources. */ + dataSourceCounter: ResourceCounter; + /** Total size of used storage in bytes. */ + storageSizeCounter: ResourceCounter; + /** Total number of synonym maps. */ + synonymMapCounter: ResourceCounter; + /** Total number of skillsets. */ + skillsetCounter: ResourceCounter; + /** Total memory consumption of all vector indexes within the service, in bytes. */ + vectorIndexSizeCounter: ResourceCounter; +} + +export function serviceCountersDeserializer(item: any): ServiceCounters { + return { + aliasCounter: resourceCounterDeserializer(item["aliasesCount"]), + documentCounter: resourceCounterDeserializer(item["documentCount"]), + indexCounter: resourceCounterDeserializer(item["indexesCount"]), + indexerCounter: resourceCounterDeserializer(item["indexersCount"]), + dataSourceCounter: resourceCounterDeserializer(item["dataSourcesCount"]), + storageSizeCounter: resourceCounterDeserializer(item["storageSize"]), + synonymMapCounter: resourceCounterDeserializer(item["synonymMaps"]), + skillsetCounter: resourceCounterDeserializer(item["skillsetCount"]), + vectorIndexSizeCounter: resourceCounterDeserializer(item["vectorIndexSize"]), + }; +} + +/** Represents a resource's usage and quota. */ +export interface ResourceCounter { + /** The resource usage amount. */ + usage: number; + /** The resource amount quota. */ + quota?: number; +} + +export function resourceCounterDeserializer(item: any): ResourceCounter { + return { + usage: item["usage"], + quota: item["quota"], + }; +} + +/** Represents various service level limits. */ +export interface ServiceLimits { + /** The maximum allowed fields per index. */ + maxFieldsPerIndex?: number; + /** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */ + maxFieldNestingDepthPerIndex?: number; + /** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */ + maxComplexCollectionFieldsPerIndex?: number; + /** The maximum number of objects in complex collections allowed per document. */ + maxComplexObjectsInCollectionsPerDocument?: number; + /** The maximum amount of storage in bytes allowed per index. */ + maxStoragePerIndexInBytes?: number; + /** The maximum cumulative indexer runtime in seconds allowed for the service. */ + maxCumulativeIndexerRuntimeSeconds?: number; +} + +export function serviceLimitsDeserializer(item: any): ServiceLimits { + return { + maxFieldsPerIndex: item["maxFieldsPerIndex"], + maxFieldNestingDepthPerIndex: item["maxFieldNestingDepthPerIndex"], + maxComplexCollectionFieldsPerIndex: item["maxComplexCollectionFieldsPerIndex"], + maxComplexObjectsInCollectionsPerDocument: item["maxComplexObjectsInCollectionsPerDocument"], + maxStoragePerIndexInBytes: item["maxStoragePerIndex"], + maxCumulativeIndexerRuntimeSeconds: item["maxCumulativeIndexerRuntimeSeconds"], + }; +} + +/** Response from a request to retrieve stats summary of all indexes. If successful, it includes the stats of each index in the service. */ +export interface _ListIndexStatsSummary { + /** The Statistics summary of all indexes in the Search service. */ + readonly indexesStatistics: IndexStatisticsSummary[]; +} + +export function _listIndexStatsSummaryDeserializer(item: any): _ListIndexStatsSummary { + return { + indexesStatistics: indexStatisticsSummaryArrayDeserializer(item["value"]), + }; +} + +export function indexStatisticsSummaryArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return indexStatisticsSummaryDeserializer(item); + }); +} + +/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ +export interface IndexStatisticsSummary { + /** The name of the index. */ + readonly name: string; + /** The number of documents in the index. */ + readonly documentCount: number; + /** The amount of storage in bytes consumed by the index. */ + readonly storageSize: number; + /** The amount of memory in bytes consumed by vectors in the index. */ + readonly vectorIndexSize?: number; +} + +export function indexStatisticsSummaryDeserializer(item: any): IndexStatisticsSummary { + return { + name: item["name"], + documentCount: item["documentCount"], + storageSize: item["storageSize"], + vectorIndexSize: item["vectorIndexSize"], + }; +} + +/** Represents a datasource definition, which can be used to configure an indexer. */ +export interface SearchIndexerDataSourceConnection { + /** The name of the datasource. */ + name: string; + /** The description of the datasource. */ + description?: string; + /** The type of the datasource. */ + type: SearchIndexerDataSourceType; + /** A specific type of the data source, in case the resource is capable of different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts. */ + subType?: string; + /** Credentials for the datasource. */ + credentials: DataSourceCredentials; + /** The data container for the datasource. */ + container: SearchIndexerDataContainer; + /** An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If "none" is specified, the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** Ingestion options with various types of permission data. */ + indexerPermissionOptions?: IndexerPermissionOption[]; + /** The data change detection policy for the datasource. */ + dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion; + /** The data deletion detection policy for the datasource. */ + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion; + /** The ETag of the data source. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition. Once you have encrypted your data source definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; +} + +export function searchIndexerDataSourceConnectionSerializer( + item: SearchIndexerDataSourceConnection, +): any { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subType: item["subType"], + credentials: dataSourceCredentialsSerializer(item["credentials"]), + container: searchIndexerDataContainerSerializer(item["container"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + indexerPermissionOptions: !item["indexerPermissionOptions"] + ? item["indexerPermissionOptions"] + : item["indexerPermissionOptions"].map((p: any) => { + return p; + }), + dataChangeDetectionPolicy: !item["dataChangeDetectionPolicy"] + ? item["dataChangeDetectionPolicy"] + : dataChangeDetectionPolicyUnionSerializer(item["dataChangeDetectionPolicy"]), + dataDeletionDetectionPolicy: !item["dataDeletionDetectionPolicy"] + ? item["dataDeletionDetectionPolicy"] + : dataDeletionDetectionPolicyUnionSerializer(item["dataDeletionDetectionPolicy"]), + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + }; +} + +export function searchIndexerDataSourceConnectionDeserializer( + item: any, +): SearchIndexerDataSourceConnection { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subType: item["subType"], + credentials: dataSourceCredentialsDeserializer(item["credentials"]), + container: searchIndexerDataContainerDeserializer(item["container"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + indexerPermissionOptions: !item["indexerPermissionOptions"] + ? item["indexerPermissionOptions"] + : item["indexerPermissionOptions"].map((p: any) => { + return p; + }), + dataChangeDetectionPolicy: !item["dataChangeDetectionPolicy"] + ? item["dataChangeDetectionPolicy"] + : dataChangeDetectionPolicyUnionDeserializer(item["dataChangeDetectionPolicy"]), + dataDeletionDetectionPolicy: !item["dataDeletionDetectionPolicy"] + ? item["dataDeletionDetectionPolicy"] + : dataDeletionDetectionPolicyUnionDeserializer(item["dataDeletionDetectionPolicy"]), + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + }; +} + +/** Defines the type of a datasource. */ +export enum KnownSearchIndexerDataSourceType { + /** Indicates an Azure SQL datasource. */ + AzureSql = "azuresql", + /** Indicates a CosmosDB datasource. */ + CosmosDb = "cosmosdb", + /** Indicates an Azure Blob datasource. */ + AzureBlob = "azureblob", + /** Indicates an Azure Table datasource. */ + AzureTable = "azuretable", + /** Indicates a MySql datasource. */ + MySql = "mysql", + /** Indicates an ADLS Gen2 datasource. */ + AdlsGen2 = "adlsgen2", + /** Indicates a Microsoft Fabric OneLake datasource. */ + OneLake = "onelake", +} + +/** + * Defines the type of a datasource. \ + * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **azuresql**: Indicates an Azure SQL datasource. \ + * **cosmosdb**: Indicates a CosmosDB datasource. \ + * **azureblob**: Indicates an Azure Blob datasource. \ + * **azuretable**: Indicates an Azure Table datasource. \ + * **mysql**: Indicates a MySql datasource. \ + * **adlsgen2**: Indicates an ADLS Gen2 datasource. \ + * **onelake**: Indicates a Microsoft Fabric OneLake datasource. + */ +export type SearchIndexerDataSourceType = string; + +/** Represents credentials that can be used to connect to a datasource. */ +export interface DataSourceCredentials { + /** The connection string for the datasource. Set to `` (with brackets) if you don't want the connection string updated. Set to `` if you want to remove the connection string value from the datasource. */ + connectionString?: string; +} + +export function dataSourceCredentialsSerializer(item: DataSourceCredentials): any { + return { connectionString: item["connectionString"] }; +} + +export function dataSourceCredentialsDeserializer(item: any): DataSourceCredentials { + return { + connectionString: item["connectionString"], + }; +} + +/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */ +export interface SearchIndexerDataContainer { + /** The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. */ + name: string; + /** A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. */ + query?: string; +} + +export function searchIndexerDataContainerSerializer(item: SearchIndexerDataContainer): any { + return { name: item["name"], query: item["query"] }; +} + +export function searchIndexerDataContainerDeserializer(item: any): SearchIndexerDataContainer { + return { + name: item["name"], + query: item["query"], + }; +} + +/** Options with various types of permission data to index. */ +export enum KnownIndexerPermissionOption { + /** Indexer to ingest ACL userIds from data source to index. */ + UserIds = "userIds", + /** Indexer to ingest ACL groupIds from data source to index. */ + GroupIds = "groupIds", + /** Indexer to ingest Azure RBAC scope from data source to index. */ + RbacScope = "rbacScope", +} + +/** + * Options with various types of permission data to index. \ + * {@link KnownIndexerPermissionOption} can be used interchangeably with IndexerPermissionOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **userIds**: Indexer to ingest ACL userIds from data source to index. \ + * **groupIds**: Indexer to ingest ACL groupIds from data source to index. \ + * **rbacScope**: Indexer to ingest Azure RBAC scope from data source to index. + */ +export type IndexerPermissionOption = string; + +/** Base type for data change detection policies. */ +export interface DataChangeDetectionPolicy { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy, #Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy */ + odatatype: string; +} + +export function dataChangeDetectionPolicySerializer(item: DataChangeDetectionPolicy): any { + return { "@odata.type": item["odatatype"] }; +} + +export function dataChangeDetectionPolicyDeserializer(item: any): DataChangeDetectionPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for DataChangeDetectionPolicyUnion */ +export type DataChangeDetectionPolicyUnion = + | HighWaterMarkChangeDetectionPolicy + | SqlIntegratedChangeTrackingPolicy + | DataChangeDetectionPolicy; + +export function dataChangeDetectionPolicyUnionSerializer( + item: DataChangeDetectionPolicyUnion, +): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": + return highWaterMarkChangeDetectionPolicySerializer( + item as HighWaterMarkChangeDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": + return sqlIntegratedChangeTrackingPolicySerializer(item as SqlIntegratedChangeTrackingPolicy); + + default: + return dataChangeDetectionPolicySerializer(item); + } +} + +export function dataChangeDetectionPolicyUnionDeserializer( + item: any, +): DataChangeDetectionPolicyUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": + return highWaterMarkChangeDetectionPolicyDeserializer( + item as HighWaterMarkChangeDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": + return sqlIntegratedChangeTrackingPolicyDeserializer( + item as SqlIntegratedChangeTrackingPolicy, + ); + + default: + return dataChangeDetectionPolicyDeserializer(item); + } +} + +/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */ +export interface HighWaterMarkChangeDetectionPolicy extends DataChangeDetectionPolicy { + /** The name of the high water mark column. */ + highWaterMarkColumnName: string; + /** A URI fragment specifying the type of data change detection policy. */ + odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; +} + +export function highWaterMarkChangeDetectionPolicySerializer( + item: HighWaterMarkChangeDetectionPolicy, +): any { + return { + "@odata.type": item["odatatype"], + highWaterMarkColumnName: item["highWaterMarkColumnName"], + }; +} + +export function highWaterMarkChangeDetectionPolicyDeserializer( + item: any, +): HighWaterMarkChangeDetectionPolicy { + return { + odatatype: item["@odata.type"], + highWaterMarkColumnName: item["highWaterMarkColumnName"], + }; +} + +/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */ +export interface SqlIntegratedChangeTrackingPolicy extends DataChangeDetectionPolicy { + /** A URI fragment specifying the type of data change detection policy. */ + odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; +} + +export function sqlIntegratedChangeTrackingPolicySerializer( + item: SqlIntegratedChangeTrackingPolicy, +): any { + return { "@odata.type": item["odatatype"] }; +} + +export function sqlIntegratedChangeTrackingPolicyDeserializer( + item: any, +): SqlIntegratedChangeTrackingPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Base type for data deletion detection policies. */ +export interface DataDeletionDetectionPolicy { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy, #Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy */ + odatatype: string; +} + +export function dataDeletionDetectionPolicySerializer(item: DataDeletionDetectionPolicy): any { + return { "@odata.type": item["odatatype"] }; +} + +export function dataDeletionDetectionPolicyDeserializer(item: any): DataDeletionDetectionPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for DataDeletionDetectionPolicyUnion */ +export type DataDeletionDetectionPolicyUnion = + | SoftDeleteColumnDeletionDetectionPolicy + | NativeBlobSoftDeleteDeletionDetectionPolicy + | DataDeletionDetectionPolicy; + +export function dataDeletionDetectionPolicyUnionSerializer( + item: DataDeletionDetectionPolicyUnion, +): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": + return softDeleteColumnDeletionDetectionPolicySerializer( + item as SoftDeleteColumnDeletionDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": + return nativeBlobSoftDeleteDeletionDetectionPolicySerializer( + item as NativeBlobSoftDeleteDeletionDetectionPolicy, + ); + + default: + return dataDeletionDetectionPolicySerializer(item); + } +} + +export function dataDeletionDetectionPolicyUnionDeserializer( + item: any, +): DataDeletionDetectionPolicyUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": + return softDeleteColumnDeletionDetectionPolicyDeserializer( + item as SoftDeleteColumnDeletionDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": + return nativeBlobSoftDeleteDeletionDetectionPolicyDeserializer( + item as NativeBlobSoftDeleteDeletionDetectionPolicy, + ); + + default: + return dataDeletionDetectionPolicyDeserializer(item); + } +} + +/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */ +export interface SoftDeleteColumnDeletionDetectionPolicy extends DataDeletionDetectionPolicy { + /** The name of the column to use for soft-deletion detection. */ + softDeleteColumnName?: string; + /** The marker value that identifies an item as deleted. */ + softDeleteMarkerValue?: string; + /** A URI fragment specifying the type of data deletion detection policy. */ + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; +} + +export function softDeleteColumnDeletionDetectionPolicySerializer( + item: SoftDeleteColumnDeletionDetectionPolicy, +): any { + return { + "@odata.type": item["odatatype"], + softDeleteColumnName: item["softDeleteColumnName"], + softDeleteMarkerValue: item["softDeleteMarkerValue"], + }; +} + +export function softDeleteColumnDeletionDetectionPolicyDeserializer( + item: any, +): SoftDeleteColumnDeletionDetectionPolicy { + return { + odatatype: item["@odata.type"], + softDeleteColumnName: item["softDeleteColumnName"], + softDeleteMarkerValue: item["softDeleteMarkerValue"], + }; +} + +/** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */ +export interface NativeBlobSoftDeleteDeletionDetectionPolicy extends DataDeletionDetectionPolicy { + /** A URI fragment specifying the type of data deletion detection policy. */ + odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; +} + +export function nativeBlobSoftDeleteDeletionDetectionPolicySerializer( + item: NativeBlobSoftDeleteDeletionDetectionPolicy, +): any { + return { "@odata.type": item["odatatype"] }; +} + +export function nativeBlobSoftDeleteDeletionDetectionPolicyDeserializer( + item: any, +): NativeBlobSoftDeleteDeletionDetectionPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */ +export interface ListDataSourcesResult { + /** The datasources in the Search service. */ + dataSources: SearchIndexerDataSourceConnection[]; +} + +export function listDataSourcesResultDeserializer(item: any): ListDataSourcesResult { + return { + dataSources: searchIndexerDataSourceConnectionArrayDeserializer(item["value"]), + }; +} + +export function searchIndexerDataSourceConnectionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerDataSourceConnectionSerializer(item); + }); +} + +export function searchIndexerDataSourceConnectionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerDataSourceConnectionDeserializer(item); + }); +} + +/** The type of the keysOrIds. */ +export interface DocumentKeysOrIds { + /** document keys to be reset */ + documentKeys?: string[]; + /** datasource document identifiers to be reset */ + datasourceDocumentIds?: string[]; +} + +export function documentKeysOrIdsSerializer(item: DocumentKeysOrIds): any { + return { + documentKeys: !item["documentKeys"] + ? item["documentKeys"] + : item["documentKeys"].map((p: any) => { + return p; + }), + datasourceDocumentIds: !item["datasourceDocumentIds"] + ? item["datasourceDocumentIds"] + : item["datasourceDocumentIds"].map((p: any) => { + return p; + }), + }; +} + +/** Represents an indexer. */ +export interface SearchIndexer { + /** The name of the indexer. */ + name: string; + /** The description of the indexer. */ + description?: string; + /** The name of the datasource from which this indexer reads data. */ + dataSourceName: string; + /** The name of the skillset executing with this indexer. */ + skillsetName?: string; + /** The name of the index to which this indexer writes data. */ + targetIndexName: string; + /** The schedule for this indexer. */ + schedule?: IndexingSchedule; + /** Parameters for indexer execution. */ + parameters?: IndexingParameters; + /** Defines mappings between fields in the data source and corresponding target fields in the index. */ + fieldMappings?: FieldMapping[]; + /** Output field mappings are applied after enrichment and immediately before indexing. */ + outputFieldMappings?: FieldMapping[]; + /** A value indicating whether the indexer is disabled. Default is false. */ + isDisabled?: boolean; + /** The ETag of the indexer. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; + /** Adds caching to an enrichment pipeline to allow for incremental modification steps without having to rebuild the index every time. */ + cache?: SearchIndexerCache; +} + +export function searchIndexerSerializer(item: SearchIndexer): any { + return { + name: item["name"], + description: item["description"], + dataSourceName: item["dataSourceName"], + skillsetName: item["skillsetName"], + targetIndexName: item["targetIndexName"], + schedule: !item["schedule"] ? item["schedule"] : indexingScheduleSerializer(item["schedule"]), + parameters: !item["parameters"] + ? item["parameters"] + : indexingParametersSerializer(item["parameters"]), + fieldMappings: !item["fieldMappings"] + ? item["fieldMappings"] + : fieldMappingArraySerializer(item["fieldMappings"]), + outputFieldMappings: !item["outputFieldMappings"] + ? item["outputFieldMappings"] + : fieldMappingArraySerializer(item["outputFieldMappings"]), + disabled: item["isDisabled"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + cache: !item["cache"] ? item["cache"] : searchIndexerCacheSerializer(item["cache"]), + }; +} + +export function searchIndexerDeserializer(item: any): SearchIndexer { + return { + name: item["name"], + description: item["description"], + dataSourceName: item["dataSourceName"], + skillsetName: item["skillsetName"], + targetIndexName: item["targetIndexName"], + schedule: !item["schedule"] ? item["schedule"] : indexingScheduleDeserializer(item["schedule"]), + parameters: !item["parameters"] + ? item["parameters"] + : indexingParametersDeserializer(item["parameters"]), + fieldMappings: !item["fieldMappings"] + ? item["fieldMappings"] + : fieldMappingArrayDeserializer(item["fieldMappings"]), + outputFieldMappings: !item["outputFieldMappings"] + ? item["outputFieldMappings"] + : fieldMappingArrayDeserializer(item["outputFieldMappings"]), + isDisabled: item["disabled"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + cache: !item["cache"] ? item["cache"] : searchIndexerCacheDeserializer(item["cache"]), + }; +} + +/** Represents parameters for indexer execution. */ +export interface IndexingParameters { + /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */ + batchSize?: number; + /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */ + maxFailedItems?: number; + /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */ + maxFailedItemsPerBatch?: number; + /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + configuration?: IndexingParametersConfiguration; +} + +export function indexingParametersSerializer(item: IndexingParameters): any { + return { + batchSize: item["batchSize"], + maxFailedItems: item["maxFailedItems"], + maxFailedItemsPerBatch: item["maxFailedItemsPerBatch"], + configuration: !item["configuration"] + ? item["configuration"] + : indexingParametersConfigurationSerializer(item["configuration"]), + }; +} + +export function indexingParametersDeserializer(item: any): IndexingParameters { + return { + batchSize: item["batchSize"], + maxFailedItems: item["maxFailedItems"], + maxFailedItemsPerBatch: item["maxFailedItemsPerBatch"], + configuration: !item["configuration"] + ? item["configuration"] + : indexingParametersConfigurationDeserializer(item["configuration"]), + }; +} + +/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface IndexingParametersConfiguration { + /** Represents the parsing mode for indexing from an Azure blob data source. */ + parsingMode?: BlobIndexerParsingMode; + /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over those files during indexing. */ + excludedFileNameExtensions?: string; + /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. */ + indexedFileNameExtensions?: string; + /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */ + failOnUnsupportedContentType?: boolean; + /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */ + failOnUnprocessableDocument?: boolean; + /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */ + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */ + delimitedTextHeaders?: string; + /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). */ + delimitedTextDelimiter?: string; + /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */ + firstLineContainsHeaders?: boolean; + /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ + markdownParsingSubmode?: MarkdownParsingSubmode; + /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ + markdownHeaderDepth?: MarkdownHeaderDepth; + /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */ + documentRoot?: string; + /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ + dataToExtract?: BlobIndexerDataToExtract; + /** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */ + imageAction?: BlobIndexerImageAction; + /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */ + allowSkillsetToReadFileData?: boolean; + /** Determines algorithm for text extraction from PDF files in Azure blob storage. */ + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; + /** Specifies the environment in which the indexer should execute. */ + executionEnvironment?: IndexerExecutionEnvironment; + /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format "hh:mm:ss". */ + queryTimeout?: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function indexingParametersConfigurationSerializer( + item: IndexingParametersConfiguration, +): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + parsingMode: item["parsingMode"], + excludedFileNameExtensions: item["excludedFileNameExtensions"], + indexedFileNameExtensions: item["indexedFileNameExtensions"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextHeaders: item["delimitedTextHeaders"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + markdownParsingSubmode: item["markdownParsingSubmode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + documentRoot: item["documentRoot"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + executionEnvironment: item["executionEnvironment"], + queryTimeout: item["queryTimeout"], + }; +} + +export function indexingParametersConfigurationDeserializer( + item: any, +): IndexingParametersConfiguration { + return { + additionalProperties: serializeRecord(item, [ + "parsingMode", + "excludedFileNameExtensions", + "indexedFileNameExtensions", + "failOnUnsupportedContentType", + "failOnUnprocessableDocument", + "indexStorageMetadataOnlyForOversizedDocuments", + "delimitedTextHeaders", + "delimitedTextDelimiter", + "firstLineContainsHeaders", + "markdownParsingSubmode", + "markdownHeaderDepth", + "documentRoot", + "dataToExtract", + "imageAction", + "allowSkillsetToReadFileData", + "pdfTextRotationAlgorithm", + "executionEnvironment", + "queryTimeout", + ]), + parsingMode: item["parsingMode"], + excludedFileNameExtensions: item["excludedFileNameExtensions"], + indexedFileNameExtensions: item["indexedFileNameExtensions"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextHeaders: item["delimitedTextHeaders"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + markdownParsingSubmode: item["markdownParsingSubmode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + documentRoot: item["documentRoot"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + executionEnvironment: item["executionEnvironment"], + queryTimeout: item["queryTimeout"], + }; +} + +/** Specifies the environment in which the indexer should execute. */ +export enum KnownIndexerExecutionEnvironment { + /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */ + Standard = "standard", + /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */ + Private = "private", +} + +/** + * Specifies the environment in which the indexer should execute. \ + * {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **standard**: Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \ + * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. + */ +export type IndexerExecutionEnvironment = string; + +export function fieldMappingArraySerializer(result: Array): any[] { + return result.map((item) => { + return fieldMappingSerializer(item); + }); +} + +export function fieldMappingArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return fieldMappingDeserializer(item); + }); +} + +/** Defines a mapping between a field in a data source and a target field in an index. */ +export interface FieldMapping { + /** The name of the field in the data source. */ + sourceFieldName: string; + /** The name of the target field in the index. Same as the source field name by default. */ + targetFieldName?: string; + /** A function to apply to each source field value before indexing. */ + mappingFunction?: FieldMappingFunction; +} + +export function fieldMappingSerializer(item: FieldMapping): any { + return { + sourceFieldName: item["sourceFieldName"], + targetFieldName: item["targetFieldName"], + mappingFunction: !item["mappingFunction"] + ? item["mappingFunction"] + : fieldMappingFunctionSerializer(item["mappingFunction"]), + }; +} + +export function fieldMappingDeserializer(item: any): FieldMapping { + return { + sourceFieldName: item["sourceFieldName"], + targetFieldName: item["targetFieldName"], + mappingFunction: !item["mappingFunction"] + ? item["mappingFunction"] + : fieldMappingFunctionDeserializer(item["mappingFunction"]), + }; +} + +/** Represents a function that transforms a value from a data source before indexing. */ +export interface FieldMappingFunction { + /** The name of the field mapping function. */ + name: string; + /** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */ + parameters?: Record; +} + +export function fieldMappingFunctionSerializer(item: FieldMappingFunction): any { + return { name: item["name"], parameters: item["parameters"] }; +} + +export function fieldMappingFunctionDeserializer(item: any): FieldMappingFunction { + return { + name: item["name"], + parameters: item["parameters"], + }; +} + +/** The type of the cache. */ +export interface SearchIndexerCache { + /** The connection string to the storage account where the cache data will be persisted. */ + storageConnectionString?: string; + /** Specifies whether incremental reprocessing is enabled. */ + enableReprocessing?: boolean; + /** The user-assigned managed identity used for connections to the enrichment cache. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** A guid for the SearchIndexerCache. */ + id?: string; +} + +export function searchIndexerCacheSerializer(item: SearchIndexerCache): any { + return { + storageConnectionString: item["storageConnectionString"], + enableReprocessing: item["enableReprocessing"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + id: item["id"], + }; +} + +export function searchIndexerCacheDeserializer(item: any): SearchIndexerCache { + return { + storageConnectionString: item["storageConnectionString"], + enableReprocessing: item["enableReprocessing"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + id: item["id"], + }; +} + +/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */ +export interface ListIndexersResult { + /** The indexers in the Search service. */ + indexers: SearchIndexer[]; +} + +export function listIndexersResultDeserializer(item: any): ListIndexersResult { + return { + indexers: searchIndexerArrayDeserializer(item["value"]), + }; +} + +export function searchIndexerArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerSerializer(item); + }); +} + +export function searchIndexerArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerDeserializer(item); + }); +} + +/** Represents the current status and execution history of an indexer. */ +export interface SearchIndexerStatus { + /** The name of the indexer. */ + name: string; + /** Overall indexer status. */ + status: IndexerStatus; + /** The indexer's cumulative runtime consumption in the service. */ + runtime?: IndexerRuntime; + /** The result of the most recent or an in-progress indexer execution. */ + lastResult?: IndexerExecutionResult; + /** History of the recent indexer executions, sorted in reverse chronological order. */ + executionHistory: IndexerExecutionResult[]; + /** The execution limits for the indexer. */ + limits: SearchIndexerLimits; + /** All of the state that defines and dictates the indexer's current execution. */ + currentState?: IndexerCurrentState; +} + +export function searchIndexerStatusDeserializer(item: any): SearchIndexerStatus { + return { + name: item["name"], + status: item["status"], + runtime: !item["runtime"] ? item["runtime"] : indexerRuntimeDeserializer(item["runtime"]), + lastResult: !item["lastResult"] + ? item["lastResult"] + : indexerExecutionResultDeserializer(item["lastResult"]), + executionHistory: indexerExecutionResultArrayDeserializer(item["executionHistory"]), + limits: searchIndexerLimitsDeserializer(item["limits"]), + currentState: !item["currentState"] + ? item["currentState"] + : indexerCurrentStateDeserializer(item["currentState"]), + }; +} + +/** Represents the overall indexer status. */ +export enum KnownIndexerStatus { + /** Indicates that the indexer is in an unknown state. */ + Unknown = "unknown", + /** Indicates that the indexer experienced an error that cannot be corrected without human intervention. */ + Error = "error", + /** Indicates that the indexer is running normally. */ + Running = "running", +} + +/** + * Represents the overall indexer status. \ + * {@link KnownIndexerStatus} can be used interchangeably with IndexerStatus, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **unknown**: Indicates that the indexer is in an unknown state. \ + * **error**: Indicates that the indexer experienced an error that cannot be corrected without human intervention. \ + * **running**: Indicates that the indexer is running normally. + */ +export type IndexerStatus = string; + +/** Represents the result of an individual indexer execution. */ +export interface IndexerExecutionResult { + /** The outcome of this indexer execution. */ + status: IndexerExecutionStatus; + /** The outcome of this indexer execution. */ + readonly statusDetail?: IndexerExecutionStatusDetail; + /** The mode the indexer is running in. */ + readonly mode?: IndexingMode; + /** All of the state that defines and dictates the indexer's current execution. */ + readonly currentState?: IndexerCurrentState; + /** The error message indicating the top-level error, if any. */ + errorMessage?: string; + /** The start time of this indexer execution. */ + startTime?: Date; + /** The end time of this indexer execution, if the execution has already completed. */ + endTime?: Date; + /** The item-level indexing errors. */ + errors: SearchIndexerError[]; + /** The item-level indexing warnings. */ + warnings: SearchIndexerWarning[]; + /** The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. */ + itemCount: number; + /** The number of items that failed to be indexed during this indexer execution. */ + failedItemCount: number; + /** Change tracking state with which an indexer execution started. */ + initialTrackingState?: string; + /** Change tracking state with which an indexer execution finished. */ + finalTrackingState?: string; +} + +export function indexerExecutionResultDeserializer(item: any): IndexerExecutionResult { + return { + status: item["status"], + statusDetail: item["statusDetail"], + mode: item["mode"], + currentState: !item["currentState"] + ? item["currentState"] + : indexerCurrentStateDeserializer(item["currentState"]), + errorMessage: item["errorMessage"], + startTime: !item["startTime"] ? item["startTime"] : new Date(item["startTime"]), + endTime: !item["endTime"] ? item["endTime"] : new Date(item["endTime"]), + errors: searchIndexerErrorArrayDeserializer(item["errors"]), + warnings: searchIndexerWarningArrayDeserializer(item["warnings"]), + itemCount: item["itemsProcessed"], + failedItemCount: item["itemsFailed"], + initialTrackingState: item["initialTrackingState"], + finalTrackingState: item["finalTrackingState"], + }; +} + +/** Represents the status of an individual indexer execution. */ +export enum KnownIndexerExecutionStatus { + /** An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. */ + TransientFailure = "transientFailure", + /** Indexer execution completed successfully. */ + Success = "success", + /** Indexer execution is in progress. */ + InProgress = "inProgress", + /** Indexer has been reset. */ + Reset = "reset", +} + +/** + * Represents the status of an individual indexer execution. \ + * {@link KnownIndexerExecutionStatus} can be used interchangeably with IndexerExecutionStatus, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **transientFailure**: An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. \ + * **success**: Indexer execution completed successfully. \ + * **inProgress**: Indexer execution is in progress. \ + * **reset**: Indexer has been reset. + */ +export type IndexerExecutionStatus = string; + +/** Details the status of an individual indexer execution. */ +export enum KnownIndexerExecutionStatusDetail { + /** Indicates that the reset that occurred was for a call to ResetDocs. */ + ResetDocs = "resetDocs", + /** Indicates to selectively resync based on option(s) from data source. */ + Resync = "resync", +} + +/** + * Details the status of an individual indexer execution. \ + * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs. \ + * **resync**: Indicates to selectively resync based on option(s) from data source. + */ +export type IndexerExecutionStatusDetail = string; + +/** Represents the mode the indexer is executing in. */ +export enum KnownIndexingMode { + /** The indexer is indexing all documents in the datasource. */ + IndexingAllDocs = "indexingAllDocs", + /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */ + IndexingResetDocs = "indexingResetDocs", + /** The indexer is resyncing and indexing selective option(s) from the datasource. */ + IndexingResync = "indexingResync", +} + +/** + * Represents the mode the indexer is executing in. \ + * {@link KnownIndexingMode} can be used interchangeably with IndexingMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \ + * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. \ + * **indexingResync**: The indexer is resyncing and indexing selective option(s) from the datasource. + */ +export type IndexingMode = string; + +/** Represents all of the state that defines and dictates the indexer's current execution. */ +export interface IndexerCurrentState { + /** The mode the indexer is running in. */ + readonly mode?: IndexingMode; + /** Change tracking state used when indexing starts on all documents in the datasource. */ + readonly allDocsInitialTrackingState?: string; + /** Change tracking state value when indexing finishes on all documents in the datasource. */ + readonly allDocsFinalTrackingState?: string; + /** Change tracking state used when indexing starts on select, reset documents in the datasource. */ + readonly resetDocsInitialTrackingState?: string; + /** Change tracking state value when indexing finishes on select, reset documents in the datasource. */ + readonly resetDocsFinalTrackingState?: string; + /** Change tracking state used when indexing starts on selective options from the datasource. */ + readonly resyncInitialTrackingState?: string; + /** Change tracking state value when indexing finishes on selective options from the datasource. */ + readonly resyncFinalTrackingState?: string; + /** The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys. */ + readonly resetDocumentKeys?: string[]; + /** The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids. */ + readonly resetDatasourceDocumentIds?: string[]; +} + +export function indexerCurrentStateDeserializer(item: any): IndexerCurrentState { + return { + mode: item["mode"], + allDocsInitialTrackingState: item["allDocsInitialTrackingState"], + allDocsFinalTrackingState: item["allDocsFinalTrackingState"], + resetDocsInitialTrackingState: item["resetDocsInitialTrackingState"], + resetDocsFinalTrackingState: item["resetDocsFinalTrackingState"], + resyncInitialTrackingState: item["resyncInitialTrackingState"], + resyncFinalTrackingState: item["resyncFinalTrackingState"], + resetDocumentKeys: !item["resetDocumentKeys"] + ? item["resetDocumentKeys"] + : item["resetDocumentKeys"].map((p: any) => { + return p; + }), + resetDatasourceDocumentIds: !item["resetDatasourceDocumentIds"] + ? item["resetDatasourceDocumentIds"] + : item["resetDatasourceDocumentIds"].map((p: any) => { + return p; + }), + }; +} + +export function searchIndexerErrorArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerErrorDeserializer(item); + }); +} + +/** Represents an item- or document-level indexing error. */ +export interface SearchIndexerError { + /** The key of the item for which indexing failed. */ + key?: string; + /** The message describing the error that occurred while processing the item. */ + errorMessage: string; + /** The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. */ + statusCode: number; + /** The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. */ + name?: string; + /** Additional, verbose details about the error to assist in debugging the indexer. This may not be always available. */ + details?: string; + /** A link to a troubleshooting guide for these classes of errors. This may not be always available. */ + documentationLink?: string; +} + +export function searchIndexerErrorDeserializer(item: any): SearchIndexerError { + return { + key: item["key"], + errorMessage: item["errorMessage"], + statusCode: item["statusCode"], + name: item["name"], + details: item["details"], + documentationLink: item["documentationLink"], + }; +} + +export function searchIndexerWarningArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerWarningDeserializer(item); + }); +} + +/** Represents an item-level warning. */ +export interface SearchIndexerWarning { + /** The key of the item which generated a warning. */ + key?: string; + /** The message describing the warning that occurred while processing the item. */ + message: string; + /** The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. */ + name?: string; + /** Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available. */ + details?: string; + /** A link to a troubleshooting guide for these classes of warnings. This may not be always available. */ + documentationLink?: string; +} + +export function searchIndexerWarningDeserializer(item: any): SearchIndexerWarning { + return { + key: item["key"], + message: item["message"], + name: item["name"], + details: item["details"], + documentationLink: item["documentationLink"], + }; +} + +export function indexerExecutionResultArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return indexerExecutionResultDeserializer(item); + }); +} + +/** Represents the limits that can be applied to an indexer. */ +export interface SearchIndexerLimits { + /** The maximum duration that the indexer is permitted to run for one execution. */ + maxRunTime?: string; + /** The maximum size of a document, in bytes, which will be considered valid for indexing. */ + maxDocumentExtractionSize?: number; + /** The maximum number of characters that will be extracted from a document picked up for indexing. */ + maxDocumentContentCharactersToExtract?: number; +} + +export function searchIndexerLimitsDeserializer(item: any): SearchIndexerLimits { + return { + maxRunTime: item["maxRunTime"], + maxDocumentExtractionSize: item["maxDocumentExtractionSize"], + maxDocumentContentCharactersToExtract: item["maxDocumentContentCharactersToExtract"], + }; +} + +/** A list of skills. */ +export interface SearchIndexerSkillset { + /** The name of the skillset. */ + name: string; + /** The description of the skillset. */ + description?: string; + /** A list of skills in the skillset. */ + skills: SearchIndexerSkillUnion[]; + /** Details about the Azure AI service to be used when running skills. */ + cognitiveServicesAccount?: CognitiveServicesAccountUnion; + /** Definition of additional projections to Azure blob, table, or files, of enriched data. */ + knowledgeStore?: SearchIndexerKnowledgeStore; + /** Definition of additional projections to secondary search index(es). */ + indexProjection?: SearchIndexerIndexProjection; + /** The ETag of the skillset. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition. Once you have encrypted your skillset definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; +} + +export function searchIndexerSkillsetSerializer(item: SearchIndexerSkillset): any { + return { + name: item["name"], + description: item["description"], + skills: searchIndexerSkillUnionArraySerializer(item["skills"]), + cognitiveServices: !item["cognitiveServicesAccount"] + ? item["cognitiveServicesAccount"] + : cognitiveServicesAccountUnionSerializer(item["cognitiveServicesAccount"]), + knowledgeStore: !item["knowledgeStore"] + ? item["knowledgeStore"] + : searchIndexerKnowledgeStoreSerializer(item["knowledgeStore"]), + indexProjections: !item["indexProjection"] + ? item["indexProjection"] + : searchIndexerIndexProjectionSerializer(item["indexProjection"]), + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + }; +} + +export function searchIndexerSkillsetDeserializer(item: any): SearchIndexerSkillset { + return { + name: item["name"], + description: item["description"], + skills: searchIndexerSkillUnionArrayDeserializer(item["skills"]), + cognitiveServicesAccount: !item["cognitiveServices"] + ? item["cognitiveServices"] + : cognitiveServicesAccountUnionDeserializer(item["cognitiveServices"]), + knowledgeStore: !item["knowledgeStore"] + ? item["knowledgeStore"] + : searchIndexerKnowledgeStoreDeserializer(item["knowledgeStore"]), + indexProjection: !item["indexProjections"] + ? item["indexProjections"] + : searchIndexerIndexProjectionDeserializer(item["indexProjections"]), + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + }; +} + +export function searchIndexerSkillUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerSkillUnionSerializer(item); + }); +} + +export function searchIndexerSkillUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerSkillUnionDeserializer(item); + }); +} + +/** Base type for skills. */ +export interface SearchIndexerSkill { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Skills.Util.ConditionalSkill, #Microsoft.Skills.Text.KeyPhraseExtractionSkill, #Microsoft.Skills.Vision.OcrSkill, #Microsoft.Skills.Vision.ImageAnalysisSkill, #Microsoft.Skills.Text.LanguageDetectionSkill, #Microsoft.Skills.Util.ShaperSkill, #Microsoft.Skills.Text.MergeSkill, #Microsoft.Skills.Text.EntityRecognitionSkill, #Microsoft.Skills.Text.SentimentSkill, #Microsoft.Skills.Text.V3.SentimentSkill, #Microsoft.Skills.Text.V3.EntityLinkingSkill, #Microsoft.Skills.Text.V3.EntityRecognitionSkill, #Microsoft.Skills.Text.PIIDetectionSkill, #Microsoft.Skills.Text.SplitSkill, #Microsoft.Skills.Text.CustomEntityLookupSkill, #Microsoft.Skills.Text.TranslationSkill, #Microsoft.Skills.Util.DocumentExtractionSkill, #Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill, #Microsoft.Skills.Custom.WebApiSkill, #Microsoft.Skills.Custom.AmlSkill, #Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill, #Microsoft.Skills.Vision.VectorizeSkill, #Microsoft.Skills.Util.ContentUnderstandingSkill, #Microsoft.Skills.Custom.ChatCompletionSkill */ + odatatype: string; + /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */ + name?: string; + /** The description of the skill which describes the inputs, outputs, and usage of the skill. */ + description?: string; + /** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */ + context?: string; + /** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */ + inputs: InputFieldMappingEntry[]; + /** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */ + outputs: OutputFieldMappingEntry[]; +} + +export function searchIndexerSkillSerializer(item: SearchIndexerSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + }; +} + +export function searchIndexerSkillDeserializer(item: any): SearchIndexerSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + }; +} + +/** Alias for SearchIndexerSkillUnion */ +export type SearchIndexerSkillUnion = + | ConditionalSkill + | KeyPhraseExtractionSkill + | OcrSkill + | ImageAnalysisSkill + | LanguageDetectionSkill + | ShaperSkill + | MergeSkill + | EntityRecognitionSkill + | SentimentSkill + | SentimentSkillV3 + | EntityLinkingSkill + | EntityRecognitionSkillV3 + | PIIDetectionSkill + | SplitSkill + | CustomEntityLookupSkill + | TextTranslationSkill + | DocumentExtractionSkill + | DocumentIntelligenceLayoutSkill + | WebApiSkill + | AzureMachineLearningSkill + | AzureOpenAIEmbeddingSkill + | VisionVectorizeSkill + | ContentUnderstandingSkill + | ChatCompletionSkill + | SearchIndexerSkill; + +export function searchIndexerSkillUnionSerializer(item: SearchIndexerSkillUnion): any { + switch (item.odatatype) { + case "#Microsoft.Skills.Util.ConditionalSkill": + return conditionalSkillSerializer(item as ConditionalSkill); + + case "#Microsoft.Skills.Text.KeyPhraseExtractionSkill": + return keyPhraseExtractionSkillSerializer(item as KeyPhraseExtractionSkill); + + case "#Microsoft.Skills.Vision.OcrSkill": + return ocrSkillSerializer(item as OcrSkill); + + case "#Microsoft.Skills.Vision.ImageAnalysisSkill": + return imageAnalysisSkillSerializer(item as ImageAnalysisSkill); + + case "#Microsoft.Skills.Text.LanguageDetectionSkill": + return languageDetectionSkillSerializer(item as LanguageDetectionSkill); + + case "#Microsoft.Skills.Util.ShaperSkill": + return shaperSkillSerializer(item as ShaperSkill); + + case "#Microsoft.Skills.Text.MergeSkill": + return mergeSkillSerializer(item as MergeSkill); + + case "#Microsoft.Skills.Text.EntityRecognitionSkill": + return entityRecognitionSkillSerializer(item as EntityRecognitionSkill); + + case "#Microsoft.Skills.Text.SentimentSkill": + return sentimentSkillSerializer(item as SentimentSkill); + + case "#Microsoft.Skills.Text.V3.SentimentSkill": + return sentimentSkillV3Serializer(item as SentimentSkillV3); + + case "#Microsoft.Skills.Text.V3.EntityLinkingSkill": + return entityLinkingSkillSerializer(item as EntityLinkingSkill); + + case "#Microsoft.Skills.Text.V3.EntityRecognitionSkill": + return entityRecognitionSkillV3Serializer(item as EntityRecognitionSkillV3); + + case "#Microsoft.Skills.Text.PIIDetectionSkill": + return piiDetectionSkillSerializer(item as PIIDetectionSkill); + + case "#Microsoft.Skills.Text.SplitSkill": + return splitSkillSerializer(item as SplitSkill); + + case "#Microsoft.Skills.Text.CustomEntityLookupSkill": + return customEntityLookupSkillSerializer(item as CustomEntityLookupSkill); + + case "#Microsoft.Skills.Text.TranslationSkill": + return textTranslationSkillSerializer(item as TextTranslationSkill); + + case "#Microsoft.Skills.Util.DocumentExtractionSkill": + return documentExtractionSkillSerializer(item as DocumentExtractionSkill); + + case "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": + return documentIntelligenceLayoutSkillSerializer(item as DocumentIntelligenceLayoutSkill); + + case "#Microsoft.Skills.Custom.WebApiSkill": + return webApiSkillSerializer(item as WebApiSkill); + + case "#Microsoft.Skills.Custom.AmlSkill": + return azureMachineLearningSkillSerializer(item as AzureMachineLearningSkill); + + case "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": + return azureOpenAIEmbeddingSkillSerializer(item as AzureOpenAIEmbeddingSkill); + + case "#Microsoft.Skills.Vision.VectorizeSkill": + return visionVectorizeSkillSerializer(item as VisionVectorizeSkill); + + case "#Microsoft.Skills.Util.ContentUnderstandingSkill": + return contentUnderstandingSkillSerializer(item as ContentUnderstandingSkill); + + case "#Microsoft.Skills.Custom.ChatCompletionSkill": + return chatCompletionSkillSerializer(item as ChatCompletionSkill); + + default: + return searchIndexerSkillSerializer(item); + } +} + +export function searchIndexerSkillUnionDeserializer(item: any): SearchIndexerSkillUnion { + switch (item.odatatype) { + case "#Microsoft.Skills.Util.ConditionalSkill": + return conditionalSkillDeserializer(item as ConditionalSkill); + + case "#Microsoft.Skills.Text.KeyPhraseExtractionSkill": + return keyPhraseExtractionSkillDeserializer(item as KeyPhraseExtractionSkill); + + case "#Microsoft.Skills.Vision.OcrSkill": + return ocrSkillDeserializer(item as OcrSkill); + + case "#Microsoft.Skills.Vision.ImageAnalysisSkill": + return imageAnalysisSkillDeserializer(item as ImageAnalysisSkill); + + case "#Microsoft.Skills.Text.LanguageDetectionSkill": + return languageDetectionSkillDeserializer(item as LanguageDetectionSkill); + + case "#Microsoft.Skills.Util.ShaperSkill": + return shaperSkillDeserializer(item as ShaperSkill); + + case "#Microsoft.Skills.Text.MergeSkill": + return mergeSkillDeserializer(item as MergeSkill); + + case "#Microsoft.Skills.Text.EntityRecognitionSkill": + return entityRecognitionSkillDeserializer(item as EntityRecognitionSkill); + + case "#Microsoft.Skills.Text.SentimentSkill": + return sentimentSkillDeserializer(item as SentimentSkill); + + case "#Microsoft.Skills.Text.V3.SentimentSkill": + return sentimentSkillV3Deserializer(item as SentimentSkillV3); + + case "#Microsoft.Skills.Text.V3.EntityLinkingSkill": + return entityLinkingSkillDeserializer(item as EntityLinkingSkill); + + case "#Microsoft.Skills.Text.V3.EntityRecognitionSkill": + return entityRecognitionSkillV3Deserializer(item as EntityRecognitionSkillV3); + + case "#Microsoft.Skills.Text.PIIDetectionSkill": + return piiDetectionSkillDeserializer(item as PIIDetectionSkill); + + case "#Microsoft.Skills.Text.SplitSkill": + return splitSkillDeserializer(item as SplitSkill); + + case "#Microsoft.Skills.Text.CustomEntityLookupSkill": + return customEntityLookupSkillDeserializer(item as CustomEntityLookupSkill); + + case "#Microsoft.Skills.Text.TranslationSkill": + return textTranslationSkillDeserializer(item as TextTranslationSkill); + + case "#Microsoft.Skills.Util.DocumentExtractionSkill": + return documentExtractionSkillDeserializer(item as DocumentExtractionSkill); + + case "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": + return documentIntelligenceLayoutSkillDeserializer(item as DocumentIntelligenceLayoutSkill); + + case "#Microsoft.Skills.Custom.WebApiSkill": + return webApiSkillDeserializer(item as WebApiSkill); + + case "#Microsoft.Skills.Custom.AmlSkill": + return azureMachineLearningSkillDeserializer(item as AzureMachineLearningSkill); + + case "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": + return azureOpenAIEmbeddingSkillDeserializer(item as AzureOpenAIEmbeddingSkill); + + case "#Microsoft.Skills.Vision.VectorizeSkill": + return visionVectorizeSkillDeserializer(item as VisionVectorizeSkill); + + case "#Microsoft.Skills.Util.ContentUnderstandingSkill": + return contentUnderstandingSkillDeserializer(item as ContentUnderstandingSkill); + + case "#Microsoft.Skills.Custom.ChatCompletionSkill": + return chatCompletionSkillDeserializer(item as ChatCompletionSkill); + + default: + return searchIndexerSkillDeserializer(item); + } +} + +export function inputFieldMappingEntryArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return inputFieldMappingEntrySerializer(item); + }); +} + +export function inputFieldMappingEntryArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return inputFieldMappingEntryDeserializer(item); + }); +} + +/** Input field mapping for a skill. */ +export interface InputFieldMappingEntry { + /** The name of the input. */ + name: string; + /** The source of the input. */ + source?: string; + /** The source context used for selecting recursive inputs. */ + sourceContext?: string; + /** The recursive inputs used when creating a complex type. */ + inputs?: InputFieldMappingEntry[]; +} + +export function inputFieldMappingEntrySerializer(item: InputFieldMappingEntry): any { + return { + name: item["name"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function inputFieldMappingEntryDeserializer(item: any): InputFieldMappingEntry { + return { + name: item["name"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +export function outputFieldMappingEntryArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return outputFieldMappingEntrySerializer(item); + }); +} + +export function outputFieldMappingEntryArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return outputFieldMappingEntryDeserializer(item); + }); +} + +/** Output field mapping for a skill. */ +export interface OutputFieldMappingEntry { + /** The name of the output defined by the skill. */ + name: string; + /** The target name of the output. It is optional and default to name. */ + targetName?: string; +} + +export function outputFieldMappingEntrySerializer(item: OutputFieldMappingEntry): any { + return { name: item["name"], targetName: item["targetName"] }; +} + +export function outputFieldMappingEntryDeserializer(item: any): OutputFieldMappingEntry { + return { + name: item["name"], + targetName: item["targetName"], + }; +} + +/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */ +export interface ConditionalSkill extends SearchIndexerSkill { + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; +} + +export function conditionalSkillSerializer(item: ConditionalSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + }; +} + +export function conditionalSkillDeserializer(item: any): ConditionalSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + }; +} + +/** A skill that uses text analytics for key phrase extraction. */ +export interface KeyPhraseExtractionSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; + /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */ + maxKeyPhraseCount?: number; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; +} + +export function keyPhraseExtractionSkillSerializer(item: KeyPhraseExtractionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + maxKeyPhraseCount: item["maxKeyPhraseCount"], + modelVersion: item["modelVersion"], + }; +} + +export function keyPhraseExtractionSkillDeserializer(item: any): KeyPhraseExtractionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + maxKeyPhraseCount: item["maxKeyPhraseCount"], + modelVersion: item["modelVersion"], + }; +} + +/** The language codes supported for input text by KeyPhraseExtractionSkill. */ +export enum KnownKeyPhraseExtractionSkillLanguage { + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Portuguese (Brazil) */ + PtBR = "pt-BR", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", +} + +/** + * The language codes supported for input text by KeyPhraseExtractionSkill. \ + * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **pt-BR**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish + */ +export type KeyPhraseExtractionSkillLanguage = string; + +/** A skill that extracts text from image files. */ +export interface OcrSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: OcrSkillLanguage; + /** A value indicating to turn orientation detection on or not. Default is false. */ + shouldDetectOrientation?: boolean; + /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */ + lineEnding?: OcrLineEnding; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Vision.OcrSkill"; +} + +export function ocrSkillSerializer(item: OcrSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + detectOrientation: item["shouldDetectOrientation"], + lineEnding: item["lineEnding"], + }; +} + +export function ocrSkillDeserializer(item: any): OcrSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + shouldDetectOrientation: item["detectOrientation"], + lineEnding: item["lineEnding"], + }; +} + +/** The language codes supported for input by OcrSkill. */ +export enum KnownOcrSkillLanguage { + /** Afrikaans */ + Af = "af", + /** Albanian */ + Sq = "sq", + /** Angika (Devanagiri) */ + Anp = "anp", + /** Arabic */ + Ar = "ar", + /** Asturian */ + Ast = "ast", + /** Awadhi-Hindi (Devanagiri) */ + Awa = "awa", + /** Azerbaijani (Latin) */ + Az = "az", + /** Bagheli */ + Bfy = "bfy", + /** Basque */ + Eu = "eu", + /** Belarusian (Cyrillic and Latin) */ + Be = "be", + /** Belarusian (Cyrillic) */ + BeCyrl = "be-cyrl", + /** Belarusian (Latin) */ + BeLatn = "be-latn", + /** Bhojpuri-Hindi (Devanagiri) */ + Bho = "bho", + /** Bislama */ + Bi = "bi", + /** Bodo (Devanagiri) */ + Brx = "brx", + /** Bosnian Latin */ + Bs = "bs", + /** Brajbha */ + Bra = "bra", + /** Breton */ + Br = "br", + /** Bulgarian */ + Bg = "bg", + /** Bundeli */ + Bns = "bns", + /** Buryat (Cyrillic) */ + Bua = "bua", + /** Catalan */ + Ca = "ca", + /** Cebuano */ + Ceb = "ceb", + /** Chamling */ + Rab = "rab", + /** Chamorro */ + Ch = "ch", + /** Chhattisgarhi (Devanagiri) */ + Hne = "hne", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", + /** Cornish */ + Kw = "kw", + /** Corsican */ + Co = "co", + /** Crimean Tatar (Latin) */ + Crh = "crh", + /** Croatian */ + Hr = "hr", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** Dari */ + Prs = "prs", + /** Dhimal (Devanagiri) */ + Dhi = "dhi", + /** Dogri (Devanagiri) */ + Doi = "doi", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Erzya (Cyrillic) */ + Myv = "myv", + /** Estonian */ + Et = "et", + /** Faroese */ + Fo = "fo", + /** Fijian */ + Fj = "fj", + /** Filipino */ + Fil = "fil", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Frulian */ + Fur = "fur", + /** Gagauz (Latin) */ + Gag = "gag", + /** Galician */ + Gl = "gl", + /** German */ + De = "de", + /** Gilbertese */ + Gil = "gil", + /** Gondi (Devanagiri) */ + Gon = "gon", + /** Greek */ + El = "el", + /** Greenlandic */ + Kl = "kl", + /** Gurung (Devanagiri) */ + Gvr = "gvr", + /** Haitian Creole */ + Ht = "ht", + /** Halbi (Devanagiri) */ + Hlb = "hlb", + /** Hani */ + Hni = "hni", + /** Haryanvi */ + Bgc = "bgc", + /** Hawaiian */ + Haw = "haw", + /** Hindi */ + Hi = "hi", + /** Hmong Daw (Latin) */ + Mww = "mww", + /** Ho (Devanagiri) */ + Hoc = "hoc", + /** Hungarian */ + Hu = "hu", + /** Icelandic */ + Is = "is", + /** Inari Sami */ + Smn = "smn", + /** Indonesian */ + Id = "id", + /** Interlingua */ + Ia = "ia", + /** Inuktitut (Latin) */ + Iu = "iu", + /** Irish */ + Ga = "ga", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Jaunsari (Devanagiri) */ + Jns = "Jns", + /** Javanese */ + Jv = "jv", + /** Kabuverdianu */ + Kea = "kea", + /** Kachin (Latin) */ + Kac = "kac", + /** Kangri (Devanagiri) */ + Xnr = "xnr", + /** Karachay-Balkar */ + Krc = "krc", + /** Kara-Kalpak (Cyrillic) */ + KaaCyrl = "kaa-cyrl", + /** Kara-Kalpak (Latin) */ + Kaa = "kaa", + /** Kashubian */ + Csb = "csb", + /** Kazakh (Cyrillic) */ + KkCyrl = "kk-cyrl", + /** Kazakh (Latin) */ + KkLatn = "kk-latn", + /** Khaling */ + Klr = "klr", + /** Khasi */ + Kha = "kha", + /** K'iche' */ + Quc = "quc", + /** Korean */ + Ko = "ko", + /** Korku */ + Kfq = "kfq", + /** Koryak */ + Kpy = "kpy", + /** Kosraean */ + Kos = "kos", + /** Kumyk (Cyrillic) */ + Kum = "kum", + /** Kurdish (Arabic) */ + KuArab = "ku-arab", + /** Kurdish (Latin) */ + KuLatn = "ku-latn", + /** Kurukh (Devanagiri) */ + Kru = "kru", + /** Kyrgyz (Cyrillic) */ + Ky = "ky", + /** Lakota */ + Lkt = "lkt", + /** Latin */ + La = "la", + /** Lithuanian */ + Lt = "lt", + /** Lower Sorbian */ + Dsb = "dsb", + /** Lule Sami */ + Smj = "smj", + /** Luxembourgish */ + Lb = "lb", + /** Mahasu Pahari (Devanagiri) */ + Bfz = "bfz", + /** Malay (Latin) */ + Ms = "ms", + /** Maltese */ + Mt = "mt", + /** Malto (Devanagiri) */ + Kmj = "kmj", + /** Manx */ + Gv = "gv", + /** Maori */ + Mi = "mi", + /** Marathi */ + Mr = "mr", + /** Mongolian (Cyrillic) */ + Mn = "mn", + /** Montenegrin (Cyrillic) */ + CnrCyrl = "cnr-cyrl", + /** Montenegrin (Latin) */ + CnrLatn = "cnr-latn", + /** Neapolitan */ + Nap = "nap", + /** Nepali */ + Ne = "ne", + /** Niuean */ + Niu = "niu", + /** Nogay */ + Nog = "nog", + /** Northern Sami (Latin) */ + Sme = "sme", + /** Norwegian */ + Nb = "nb", + /** Norwegian */ + No = "no", + /** Occitan */ + Oc = "oc", + /** Ossetic */ + Os = "os", + /** Pashto */ + Ps = "ps", + /** Persian */ + Fa = "fa", + /** Polish */ + Pl = "pl", + /** Portuguese */ + Pt = "pt", + /** Punjabi (Arabic) */ + Pa = "pa", + /** Ripuarian */ + Ksh = "ksh", + /** Romanian */ + Ro = "ro", + /** Romansh */ + Rm = "rm", + /** Russian */ + Ru = "ru", + /** Sadri (Devanagiri) */ + Sck = "sck", + /** Samoan (Latin) */ + Sm = "sm", + /** Sanskrit (Devanagiri) */ + Sa = "sa", + /** Santali (Devanagiri) */ + Sat = "sat", + /** Scots */ + Sco = "sco", + /** Scottish Gaelic */ + Gd = "gd", + /** Serbian (Latin) */ + Sr = "sr", + /** Serbian (Cyrillic) */ + SrCyrl = "sr-Cyrl", + /** Serbian (Latin) */ + SrLatn = "sr-Latn", + /** Sherpa (Devanagiri) */ + Xsr = "xsr", + /** Sirmauri (Devanagiri) */ + Srx = "srx", + /** Skolt Sami */ + Sms = "sms", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Somali (Arabic) */ + So = "so", + /** Southern Sami */ + Sma = "sma", + /** Spanish */ + Es = "es", + /** Swahili (Latin) */ + Sw = "sw", + /** Swedish */ + Sv = "sv", + /** Tajik (Cyrillic) */ + Tg = "tg", + /** Tatar (Latin) */ + Tt = "tt", + /** Tetum */ + Tet = "tet", + /** Thangmi */ + Thf = "thf", + /** Tongan */ + To = "to", + /** Turkish */ + Tr = "tr", + /** Turkmen (Latin) */ + Tk = "tk", + /** Tuvan */ + Tyv = "tyv", + /** Upper Sorbian */ + Hsb = "hsb", + /** Urdu */ + Ur = "ur", + /** Uyghur (Arabic) */ + Ug = "ug", + /** Uzbek (Arabic) */ + UzArab = "uz-arab", + /** Uzbek (Cyrillic) */ + UzCyrl = "uz-cyrl", + /** Uzbek (Latin) */ + Uz = "uz", + /** Volapük */ + Vo = "vo", + /** Walser */ + Wae = "wae", + /** Welsh */ + Cy = "cy", + /** Western Frisian */ + Fy = "fy", + /** Yucatec Maya */ + Yua = "yua", + /** Zhuang */ + Za = "za", + /** Zulu */ + Zu = "zu", + /** Unknown (All) */ + Unk = "unk", +} + +/** + * The language codes supported for input by OcrSkill. \ + * {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **af**: Afrikaans \ + * **sq**: Albanian \ + * **anp**: Angika (Devanagiri) \ + * **ar**: Arabic \ + * **ast**: Asturian \ + * **awa**: Awadhi-Hindi (Devanagiri) \ + * **az**: Azerbaijani (Latin) \ + * **bfy**: Bagheli \ + * **eu**: Basque \ + * **be**: Belarusian (Cyrillic and Latin) \ + * **be-cyrl**: Belarusian (Cyrillic) \ + * **be-latn**: Belarusian (Latin) \ + * **bho**: Bhojpuri-Hindi (Devanagiri) \ + * **bi**: Bislama \ + * **brx**: Bodo (Devanagiri) \ + * **bs**: Bosnian Latin \ + * **bra**: Brajbha \ + * **br**: Breton \ + * **bg**: Bulgarian \ + * **bns**: Bundeli \ + * **bua**: Buryat (Cyrillic) \ + * **ca**: Catalan \ + * **ceb**: Cebuano \ + * **rab**: Chamling \ + * **ch**: Chamorro \ + * **hne**: Chhattisgarhi (Devanagiri) \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional \ + * **kw**: Cornish \ + * **co**: Corsican \ + * **crh**: Crimean Tatar (Latin) \ + * **hr**: Croatian \ + * **cs**: Czech \ + * **da**: Danish \ + * **prs**: Dari \ + * **dhi**: Dhimal (Devanagiri) \ + * **doi**: Dogri (Devanagiri) \ + * **nl**: Dutch \ + * **en**: English \ + * **myv**: Erzya (Cyrillic) \ + * **et**: Estonian \ + * **fo**: Faroese \ + * **fj**: Fijian \ + * **fil**: Filipino \ + * **fi**: Finnish \ + * **fr**: French \ + * **fur**: Frulian \ + * **gag**: Gagauz (Latin) \ + * **gl**: Galician \ + * **de**: German \ + * **gil**: Gilbertese \ + * **gon**: Gondi (Devanagiri) \ + * **el**: Greek \ + * **kl**: Greenlandic \ + * **gvr**: Gurung (Devanagiri) \ + * **ht**: Haitian Creole \ + * **hlb**: Halbi (Devanagiri) \ + * **hni**: Hani \ + * **bgc**: Haryanvi \ + * **haw**: Hawaiian \ + * **hi**: Hindi \ + * **mww**: Hmong Daw (Latin) \ + * **hoc**: Ho (Devanagiri) \ + * **hu**: Hungarian \ + * **is**: Icelandic \ + * **smn**: Inari Sami \ + * **id**: Indonesian \ + * **ia**: Interlingua \ + * **iu**: Inuktitut (Latin) \ + * **ga**: Irish \ + * **it**: Italian \ + * **ja**: Japanese \ + * **Jns**: Jaunsari (Devanagiri) \ + * **jv**: Javanese \ + * **kea**: Kabuverdianu \ + * **kac**: Kachin (Latin) \ + * **xnr**: Kangri (Devanagiri) \ + * **krc**: Karachay-Balkar \ + * **kaa-cyrl**: Kara-Kalpak (Cyrillic) \ + * **kaa**: Kara-Kalpak (Latin) \ + * **csb**: Kashubian \ + * **kk-cyrl**: Kazakh (Cyrillic) \ + * **kk-latn**: Kazakh (Latin) \ + * **klr**: Khaling \ + * **kha**: Khasi \ + * **quc**: K'iche' \ + * **ko**: Korean \ + * **kfq**: Korku \ + * **kpy**: Koryak \ + * **kos**: Kosraean \ + * **kum**: Kumyk (Cyrillic) \ + * **ku-arab**: Kurdish (Arabic) \ + * **ku-latn**: Kurdish (Latin) \ + * **kru**: Kurukh (Devanagiri) \ + * **ky**: Kyrgyz (Cyrillic) \ + * **lkt**: Lakota \ + * **la**: Latin \ + * **lt**: Lithuanian \ + * **dsb**: Lower Sorbian \ + * **smj**: Lule Sami \ + * **lb**: Luxembourgish \ + * **bfz**: Mahasu Pahari (Devanagiri) \ + * **ms**: Malay (Latin) \ + * **mt**: Maltese \ + * **kmj**: Malto (Devanagiri) \ + * **gv**: Manx \ + * **mi**: Maori \ + * **mr**: Marathi \ + * **mn**: Mongolian (Cyrillic) \ + * **cnr-cyrl**: Montenegrin (Cyrillic) \ + * **cnr-latn**: Montenegrin (Latin) \ + * **nap**: Neapolitan \ + * **ne**: Nepali \ + * **niu**: Niuean \ + * **nog**: Nogay \ + * **sme**: Northern Sami (Latin) \ + * **nb**: Norwegian \ + * **no**: Norwegian \ + * **oc**: Occitan \ + * **os**: Ossetic \ + * **ps**: Pashto \ + * **fa**: Persian \ + * **pl**: Polish \ + * **pt**: Portuguese \ + * **pa**: Punjabi (Arabic) \ + * **ksh**: Ripuarian \ + * **ro**: Romanian \ + * **rm**: Romansh \ + * **ru**: Russian \ + * **sck**: Sadri (Devanagiri) \ + * **sm**: Samoan (Latin) \ + * **sa**: Sanskrit (Devanagiri) \ + * **sat**: Santali (Devanagiri) \ + * **sco**: Scots \ + * **gd**: Scottish Gaelic \ + * **sr**: Serbian (Latin) \ + * **sr-Cyrl**: Serbian (Cyrillic) \ + * **sr-Latn**: Serbian (Latin) \ + * **xsr**: Sherpa (Devanagiri) \ + * **srx**: Sirmauri (Devanagiri) \ + * **sms**: Skolt Sami \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **so**: Somali (Arabic) \ + * **sma**: Southern Sami \ + * **es**: Spanish \ + * **sw**: Swahili (Latin) \ + * **sv**: Swedish \ + * **tg**: Tajik (Cyrillic) \ + * **tt**: Tatar (Latin) \ + * **tet**: Tetum \ + * **thf**: Thangmi \ + * **to**: Tongan \ + * **tr**: Turkish \ + * **tk**: Turkmen (Latin) \ + * **tyv**: Tuvan \ + * **hsb**: Upper Sorbian \ + * **ur**: Urdu \ + * **ug**: Uyghur (Arabic) \ + * **uz-arab**: Uzbek (Arabic) \ + * **uz-cyrl**: Uzbek (Cyrillic) \ + * **uz**: Uzbek (Latin) \ + * **vo**: Volapük \ + * **wae**: Walser \ + * **cy**: Welsh \ + * **fy**: Western Frisian \ + * **yua**: Yucatec Maya \ + * **za**: Zhuang \ + * **zu**: Zulu \ + * **unk**: Unknown (All) + */ +export type OcrSkillLanguage = string; + +/** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */ +export enum KnownOcrLineEnding { + /** Lines are separated by a single space character. */ + Space = "space", + /** Lines are separated by a carriage return (' +') character. */ + CarriageReturn = "carriageReturn", + /** + * Lines are separated by a single line feed (' + * ') character. + */ + LineFeed = "lineFeed", + /** + * Lines are separated by a carriage return and a line feed (' + * ') character. + */ + CarriageReturnLineFeed = "carriageReturnLineFeed", +} + +/** + * Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". \ + * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **space**: Lines are separated by a single space character. \ + * **carriageReturn**: Lines are separated by a carriage return (' +') character. \ + * **lineFeed**: Lines are separated by a single line feed (' + * ') character. \ + * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed (' + * ') character. + */ +export type OcrLineEnding = string; + +/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */ +export interface ImageAnalysisSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: ImageAnalysisSkillLanguage; + /** A list of visual features. */ + visualFeatures?: VisualFeature[]; + /** A string indicating which domain-specific details to return. */ + details?: ImageDetail[]; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; +} + +export function imageAnalysisSkillSerializer(item: ImageAnalysisSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + visualFeatures: !item["visualFeatures"] + ? item["visualFeatures"] + : item["visualFeatures"].map((p: any) => { + return p; + }), + details: !item["details"] + ? item["details"] + : item["details"].map((p: any) => { + return p; + }), + }; +} + +export function imageAnalysisSkillDeserializer(item: any): ImageAnalysisSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + visualFeatures: !item["visualFeatures"] + ? item["visualFeatures"] + : item["visualFeatures"].map((p: any) => { + return p; + }), + details: !item["details"] + ? item["details"] + : item["details"].map((p: any) => { + return p; + }), + }; +} + +/** The language codes supported for input by ImageAnalysisSkill. */ +export enum KnownImageAnalysisSkillLanguage { + /** Arabic */ + Ar = "ar", + /** Azerbaijani */ + Az = "az", + /** Bulgarian */ + Bg = "bg", + /** Bosnian Latin */ + Bs = "bs", + /** Catalan */ + Ca = "ca", + /** Czech */ + Cs = "cs", + /** Welsh */ + Cy = "cy", + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Estonian */ + Et = "et", + /** Basque */ + Eu = "eu", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Irish */ + Ga = "ga", + /** Galician */ + Gl = "gl", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Croatian */ + Hr = "hr", + /** Hungarian */ + Hu = "hu", + /** Indonesian */ + Id = "id", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Kazakh */ + Kk = "kk", + /** Korean */ + Ko = "ko", + /** Lithuanian */ + Lt = "lt", + /** Latvian */ + Lv = "lv", + /** Macedonian */ + Mk = "mk", + /** Malay Malaysia */ + Ms = "ms", + /** Norwegian (Bokmal) */ + Nb = "nb", + /** Dutch */ + Nl = "nl", + /** Polish */ + Pl = "pl", + /** Dari */ + Prs = "prs", + /** Portuguese-Brazil */ + PtBR = "pt-BR", + /** Portuguese-Portugal */ + Pt = "pt", + /** Portuguese-Portugal */ + PtPT = "pt-PT", + /** Romanian */ + Ro = "ro", + /** Russian */ + Ru = "ru", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Serbian - Cyrillic RS */ + SrCyrl = "sr-Cyrl", + /** Serbian - Latin RS */ + SrLatn = "sr-Latn", + /** Swedish */ + Sv = "sv", + /** Thai */ + Th = "th", + /** Turkish */ + Tr = "tr", + /** Ukrainian */ + Uk = "uk", + /** Vietnamese */ + Vi = "vi", + /** Chinese Simplified */ + Zh = "zh", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", +} + +/** + * The language codes supported for input by ImageAnalysisSkill. \ + * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **ar**: Arabic \ + * **az**: Azerbaijani \ + * **bg**: Bulgarian \ + * **bs**: Bosnian Latin \ + * **ca**: Catalan \ + * **cs**: Czech \ + * **cy**: Welsh \ + * **da**: Danish \ + * **de**: German \ + * **el**: Greek \ + * **en**: English \ + * **es**: Spanish \ + * **et**: Estonian \ + * **eu**: Basque \ + * **fi**: Finnish \ + * **fr**: French \ + * **ga**: Irish \ + * **gl**: Galician \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **hr**: Croatian \ + * **hu**: Hungarian \ + * **id**: Indonesian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **kk**: Kazakh \ + * **ko**: Korean \ + * **lt**: Lithuanian \ + * **lv**: Latvian \ + * **mk**: Macedonian \ + * **ms**: Malay Malaysia \ + * **nb**: Norwegian (Bokmal) \ + * **nl**: Dutch \ + * **pl**: Polish \ + * **prs**: Dari \ + * **pt-BR**: Portuguese-Brazil \ + * **pt**: Portuguese-Portugal \ + * **pt-PT**: Portuguese-Portugal \ + * **ro**: Romanian \ + * **ru**: Russian \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **sr-Cyrl**: Serbian - Cyrillic RS \ + * **sr-Latn**: Serbian - Latin RS \ + * **sv**: Swedish \ + * **th**: Thai \ + * **tr**: Turkish \ + * **uk**: Ukrainian \ + * **vi**: Vietnamese \ + * **zh**: Chinese Simplified \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional + */ +export type ImageAnalysisSkillLanguage = string; + +/** The strings indicating what visual feature types to return. */ +export enum KnownVisualFeature { + /** Visual features recognized as adult persons. */ + Adult = "adult", + /** Visual features recognized as commercial brands. */ + Brands = "brands", + /** Categories. */ + Categories = "categories", + /** Description. */ + Description = "description", + /** Visual features recognized as people faces. */ + Faces = "faces", + /** Visual features recognized as objects. */ + Objects = "objects", + /** Tags. */ + Tags = "tags", +} + +/** + * The strings indicating what visual feature types to return. \ + * {@link KnownVisualFeature} can be used interchangeably with VisualFeature, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **adult**: Visual features recognized as adult persons. \ + * **brands**: Visual features recognized as commercial brands. \ + * **categories**: Categories. \ + * **description**: Description. \ + * **faces**: Visual features recognized as people faces. \ + * **objects**: Visual features recognized as objects. \ + * **tags**: Tags. + */ +export type VisualFeature = string; + +/** A string indicating which domain-specific details to return. */ +export enum KnownImageDetail { + /** Details recognized as celebrities. */ + Celebrities = "celebrities", + /** Details recognized as landmarks. */ + Landmarks = "landmarks", +} + +/** + * A string indicating which domain-specific details to return. \ + * {@link KnownImageDetail} can be used interchangeably with ImageDetail, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **celebrities**: Details recognized as celebrities. \ + * **landmarks**: Details recognized as landmarks. + */ +export type ImageDetail = string; + +/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */ +export interface LanguageDetectionSkill extends SearchIndexerSkill { + /** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */ + defaultCountryHint?: string; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; +} + +export function languageDetectionSkillSerializer(item: LanguageDetectionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultCountryHint: item["defaultCountryHint"], + modelVersion: item["modelVersion"], + }; +} + +export function languageDetectionSkillDeserializer(item: any): LanguageDetectionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultCountryHint: item["defaultCountryHint"], + modelVersion: item["modelVersion"], + }; +} + +/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */ +export interface ShaperSkill extends SearchIndexerSkill { + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.ShaperSkill"; +} + +export function shaperSkillSerializer(item: ShaperSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + }; +} + +export function shaperSkillDeserializer(item: any): ShaperSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + }; +} + +/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */ +export interface MergeSkill extends SearchIndexerSkill { + /** The tag indicates the start of the merged text. By default, the tag is an empty space. */ + insertPreTag?: string; + /** The tag indicates the end of the merged text. By default, the tag is an empty space. */ + insertPostTag?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.MergeSkill"; +} + +export function mergeSkillSerializer(item: MergeSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + insertPreTag: item["insertPreTag"], + insertPostTag: item["insertPostTag"], + }; +} + +export function mergeSkillDeserializer(item: any): MergeSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + insertPreTag: item["insertPreTag"], + insertPostTag: item["insertPostTag"], + }; +} + +/** This skill is deprecated. Use the V3.EntityRecognitionSkill instead. */ +export interface EntityRecognitionSkill extends SearchIndexerSkill { + /** A list of entity categories that should be extracted. */ + categories?: EntityCategory[]; + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: EntityRecognitionSkillLanguage; + /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */ + includeTypelessEntities?: boolean; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; +} + +export function entityRecognitionSkillSerializer(item: EntityRecognitionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + includeTypelessEntities: item["includeTypelessEntities"], + minimumPrecision: item["minimumPrecision"], + }; +} + +export function entityRecognitionSkillDeserializer(item: any): EntityRecognitionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + includeTypelessEntities: item["includeTypelessEntities"], + minimumPrecision: item["minimumPrecision"], + }; +} + +/** A string indicating what entity categories to return. */ +export enum KnownEntityCategory { + /** Entities describing a physical location. */ + Location = "location", + /** Entities describing an organization. */ + Organization = "organization", + /** Entities describing a person. */ + Person = "person", + /** Entities describing a quantity. */ + Quantity = "quantity", + /** Entities describing a date and time. */ + Datetime = "datetime", + /** Entities describing a URL. */ + Url = "url", + /** Entities describing an email address. */ + Email = "email", +} + +/** + * A string indicating what entity categories to return. \ + * {@link KnownEntityCategory} can be used interchangeably with EntityCategory, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **location**: Entities describing a physical location. \ + * **organization**: Entities describing an organization. \ + * **person**: Entities describing a person. \ + * **quantity**: Entities describing a quantity. \ + * **datetime**: Entities describing a date and time. \ + * **url**: Entities describing a URL. \ + * **email**: Entities describing an email address. + */ +export type EntityCategory = string; + +/** Deprecated. The language codes supported for input text by EntityRecognitionSkill. */ +export enum KnownEntityRecognitionSkillLanguage { + /** Arabic */ + Ar = "ar", + /** Czech */ + Cs = "cs", + /** Chinese-Simplified */ + ZhHans = "zh-Hans", + /** Chinese-Traditional */ + ZhHant = "zh-Hant", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Hungarian */ + Hu = "hu", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Portuguese (Brazil) */ + PtBR = "pt-BR", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", +} + +/** + * Deprecated. The language codes supported for input text by EntityRecognitionSkill. \ + * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **ar**: Arabic \ + * **cs**: Czech \ + * **zh-Hans**: Chinese-Simplified \ + * **zh-Hant**: Chinese-Traditional \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **hu**: Hungarian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **pt-BR**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish + */ +export type EntityRecognitionSkillLanguage = string; + +/** This skill is deprecated. Use the V3.SentimentSkill instead. */ +export interface SentimentSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: SentimentSkillLanguage; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.SentimentSkill"; +} + +export function sentimentSkillSerializer(item: SentimentSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + }; +} + +export function sentimentSkillDeserializer(item: any): SentimentSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + }; +} + +/** Deprecated. The language codes supported for input text by SentimentSkill. */ +export enum KnownSentimentSkillLanguage { + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Italian */ + It = "it", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", +} + +/** + * Deprecated. The language codes supported for input text by SentimentSkill. \ + * {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **it**: Italian \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish + */ +export type SentimentSkillLanguage = string; + +/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */ +export interface SentimentSkillV3 extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */ + includeOpinionMining?: boolean; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill"; +} + +export function sentimentSkillV3Serializer(item: SentimentSkillV3): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + includeOpinionMining: item["includeOpinionMining"], + modelVersion: item["modelVersion"], + }; +} + +export function sentimentSkillV3Deserializer(item: any): SentimentSkillV3 { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + includeOpinionMining: item["includeOpinionMining"], + modelVersion: item["modelVersion"], + }; +} + +/** Using the Text Analytics API, extracts linked entities from text. */ +export interface EntityLinkingSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.V3.EntityLinkingSkill"; +} + +export function entityLinkingSkillSerializer(item: EntityLinkingSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +export function entityLinkingSkillDeserializer(item: any): EntityLinkingSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +/** Using the Text Analytics API, extracts entities of different types from text. */ +export interface EntityRecognitionSkillV3 extends SearchIndexerSkill { + /** A list of entity categories that should be extracted. */ + categories?: string[]; + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill"; +} + +export function entityRecognitionSkillV3Serializer(item: EntityRecognitionSkillV3): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +export function entityRecognitionSkillV3Deserializer(item: any): EntityRecognitionSkillV3 { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */ +export interface PIIDetectionSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */ + maskingMode?: PIIDetectionSkillMaskingMode; + /** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */ + mask?: string; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A list of PII entity categories that should be extracted and masked. */ + piiCategories?: string[]; + /** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */ + domain?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.PIIDetectionSkill"; +} + +export function piiDetectionSkillSerializer(item: PIIDetectionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + maskingMode: item["maskingMode"], + maskingCharacter: item["mask"], + modelVersion: item["modelVersion"], + piiCategories: !item["piiCategories"] + ? item["piiCategories"] + : item["piiCategories"].map((p: any) => { + return p; + }), + domain: item["domain"], + }; +} + +export function piiDetectionSkillDeserializer(item: any): PIIDetectionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + maskingMode: item["maskingMode"], + mask: item["maskingCharacter"], + modelVersion: item["modelVersion"], + piiCategories: !item["piiCategories"] + ? item["piiCategories"] + : item["piiCategories"].map((p: any) => { + return p; + }), + domain: item["domain"], + }; +} + +/** A string indicating what maskingMode to use to mask the personal information detected in the input text. */ +export enum KnownPIIDetectionSkillMaskingMode { + /** No masking occurs and the maskedText output will not be returned. */ + None = "none", + /** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */ + Replace = "replace", +} + +/** + * A string indicating what maskingMode to use to mask the personal information detected in the input text. \ + * {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: No masking occurs and the maskedText output will not be returned. \ + * **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. + */ +export type PIIDetectionSkillMaskingMode = string; + +/** A skill to split a string into chunks of text. */ +export interface SplitSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: SplitSkillLanguage; + /** A value indicating which split mode to perform. */ + textSplitMode?: TextSplitMode; + /** The desired maximum page length. Default is 10000. */ + maximumPageLength?: number; + /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */ + pageOverlapLength?: number; + /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */ + maximumPagesToTake?: number; + /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */ + unit?: SplitSkillUnit; + /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */ + azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.SplitSkill"; +} + +export function splitSkillSerializer(item: SplitSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + textSplitMode: item["textSplitMode"], + maximumPageLength: item["maximumPageLength"], + pageOverlapLength: item["pageOverlapLength"], + maximumPagesToTake: item["maximumPagesToTake"], + unit: item["unit"], + azureOpenAITokenizerParameters: !item["azureOpenAITokenizerParameters"] + ? item["azureOpenAITokenizerParameters"] + : azureOpenAITokenizerParametersSerializer(item["azureOpenAITokenizerParameters"]), + }; +} + +export function splitSkillDeserializer(item: any): SplitSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + textSplitMode: item["textSplitMode"], + maximumPageLength: item["maximumPageLength"], + pageOverlapLength: item["pageOverlapLength"], + maximumPagesToTake: item["maximumPagesToTake"], + unit: item["unit"], + azureOpenAITokenizerParameters: !item["azureOpenAITokenizerParameters"] + ? item["azureOpenAITokenizerParameters"] + : azureOpenAITokenizerParametersDeserializer(item["azureOpenAITokenizerParameters"]), + }; +} + +/** The language codes supported for input text by SplitSkill. */ +export enum KnownSplitSkillLanguage { + /** Amharic */ + Am = "am", + /** Bosnian */ + Bs = "bs", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Estonian */ + Et = "et", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Croatian */ + Hr = "hr", + /** Hungarian */ + Hu = "hu", + /** Indonesian */ + Id = "id", + /** Icelandic */ + Is = "is", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Latvian */ + Lv = "lv", + /** Norwegian */ + Nb = "nb", + /** Dutch */ + Nl = "nl", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + Pt = "pt", + /** Portuguese (Brazil) */ + PtBr = "pt-br", + /** Russian */ + Ru = "ru", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Serbian */ + Sr = "sr", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", + /** Urdu */ + Ur = "ur", + /** Chinese (Simplified) */ + Zh = "zh", +} + +/** + * The language codes supported for input text by SplitSkill. \ + * {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **am**: Amharic \ + * **bs**: Bosnian \ + * **cs**: Czech \ + * **da**: Danish \ + * **de**: German \ + * **en**: English \ + * **es**: Spanish \ + * **et**: Estonian \ + * **fi**: Finnish \ + * **fr**: French \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **hr**: Croatian \ + * **hu**: Hungarian \ + * **id**: Indonesian \ + * **is**: Icelandic \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **lv**: Latvian \ + * **nb**: Norwegian \ + * **nl**: Dutch \ + * **pl**: Polish \ + * **pt**: Portuguese (Portugal) \ + * **pt-br**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **sr**: Serbian \ + * **sv**: Swedish \ + * **tr**: Turkish \ + * **ur**: Urdu \ + * **zh**: Chinese (Simplified) + */ +export type SplitSkillLanguage = string; + +/** A value indicating which split mode to perform. */ +export enum KnownTextSplitMode { + /** Split the text into individual pages. */ + Pages = "pages", + /** Split the text into individual sentences. */ + Sentences = "sentences", +} + +/** + * A value indicating which split mode to perform. \ + * {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **pages**: Split the text into individual pages. \ + * **sentences**: Split the text into individual sentences. + */ +export type TextSplitMode = string; + +/** A value indicating which unit to use. */ +export enum KnownSplitSkillUnit { + /** The length will be measured by character. */ + Characters = "characters", + /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */ + AzureOpenAITokens = "azureOpenAITokens", +} + +/** + * A value indicating which unit to use. \ + * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: The length will be measured by character. \ + * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. + */ +export type SplitSkillUnit = string; + +/** Azure OpenAI Tokenizer parameters. */ +export interface AzureOpenAITokenizerParameters { + /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */ + encoderModelName?: SplitSkillEncoderModelName; + /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */ + allowedSpecialTokens?: string[]; +} + +export function azureOpenAITokenizerParametersSerializer( + item: AzureOpenAITokenizerParameters, +): any { + return { + encoderModelName: item["encoderModelName"], + allowedSpecialTokens: !item["allowedSpecialTokens"] + ? item["allowedSpecialTokens"] + : item["allowedSpecialTokens"].map((p: any) => { + return p; + }), + }; +} + +export function azureOpenAITokenizerParametersDeserializer( + item: any, +): AzureOpenAITokenizerParameters { + return { + encoderModelName: item["encoderModelName"], + allowedSpecialTokens: !item["allowedSpecialTokens"] + ? item["allowedSpecialTokens"] + : item["allowedSpecialTokens"].map((p: any) => { + return p; + }), + }; +} + +/** A value indicating which tokenizer to use. */ +export enum KnownSplitSkillEncoderModelName { + /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */ + R50KBase = "r50k_base", + /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */ + P50KBase = "p50k_base", + /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */ + P50KEdit = "p50k_edit", + /** A base model with a 100,000 token vocabulary. */ + CL100KBase = "cl100k_base", +} + +/** + * A value indicating which tokenizer to use. \ + * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \ + * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \ + * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \ + * **cl100k_base**: A base model with a 100,000 token vocabulary. + */ +export type SplitSkillEncoderModelName = string; + +/** A skill looks for text from a custom, user-defined list of words and phrases. */ +export interface CustomEntityLookupSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: CustomEntityLookupSkillLanguage; + /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */ + entitiesDefinitionUri?: string; + /** The inline CustomEntity definition. */ + inlineEntitiesDefinition?: CustomEntity[]; + /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */ + globalDefaultCaseSensitive?: boolean; + /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */ + globalDefaultAccentSensitive?: boolean; + /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */ + globalDefaultFuzzyEditDistance?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill"; +} + +export function customEntityLookupSkillSerializer(item: CustomEntityLookupSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + entitiesDefinitionUri: item["entitiesDefinitionUri"], + inlineEntitiesDefinition: !item["inlineEntitiesDefinition"] + ? item["inlineEntitiesDefinition"] + : customEntityArraySerializer(item["inlineEntitiesDefinition"]), + globalDefaultCaseSensitive: item["globalDefaultCaseSensitive"], + globalDefaultAccentSensitive: item["globalDefaultAccentSensitive"], + globalDefaultFuzzyEditDistance: item["globalDefaultFuzzyEditDistance"], + }; +} + +export function customEntityLookupSkillDeserializer(item: any): CustomEntityLookupSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + entitiesDefinitionUri: item["entitiesDefinitionUri"], + inlineEntitiesDefinition: !item["inlineEntitiesDefinition"] + ? item["inlineEntitiesDefinition"] + : customEntityArrayDeserializer(item["inlineEntitiesDefinition"]), + globalDefaultCaseSensitive: item["globalDefaultCaseSensitive"], + globalDefaultAccentSensitive: item["globalDefaultAccentSensitive"], + globalDefaultFuzzyEditDistance: item["globalDefaultFuzzyEditDistance"], + }; +} + +/** The language codes supported for input text by CustomEntityLookupSkill. */ +export enum KnownCustomEntityLookupSkillLanguage { + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Italian */ + It = "it", + /** Korean */ + Ko = "ko", + /** Portuguese */ + Pt = "pt", +} + +/** + * The language codes supported for input text by CustomEntityLookupSkill. \ + * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **da**: Danish \ + * **de**: German \ + * **en**: English \ + * **es**: Spanish \ + * **fi**: Finnish \ + * **fr**: French \ + * **it**: Italian \ + * **ko**: Korean \ + * **pt**: Portuguese + */ +export type CustomEntityLookupSkillLanguage = string; + +export function customEntityArraySerializer(result: Array): any[] { + return result.map((item) => { + return customEntitySerializer(item); + }); +} + +export function customEntityArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return customEntityDeserializer(item); + }); +} + +/** An object that contains information about the matches that were found, and related metadata. */ +export interface CustomEntity { + /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. */ + name: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + description?: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + type?: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + subtype?: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + id?: string; + /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. */ + caseSensitive?: boolean; + /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */ + accentSensitive?: boolean; + /** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */ + fuzzyEditDistance?: number; + /** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */ + defaultCaseSensitive?: boolean; + /** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */ + defaultAccentSensitive?: boolean; + /** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */ + defaultFuzzyEditDistance?: number; + /** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */ + aliases?: CustomEntityAlias[]; +} + +export function customEntitySerializer(item: CustomEntity): any { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subtype: item["subtype"], + id: item["id"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + defaultCaseSensitive: item["defaultCaseSensitive"], + defaultAccentSensitive: item["defaultAccentSensitive"], + defaultFuzzyEditDistance: item["defaultFuzzyEditDistance"], + aliases: !item["aliases"] ? item["aliases"] : customEntityAliasArraySerializer(item["aliases"]), + }; +} + +export function customEntityDeserializer(item: any): CustomEntity { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subtype: item["subtype"], + id: item["id"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + defaultCaseSensitive: item["defaultCaseSensitive"], + defaultAccentSensitive: item["defaultAccentSensitive"], + defaultFuzzyEditDistance: item["defaultFuzzyEditDistance"], + aliases: !item["aliases"] + ? item["aliases"] + : customEntityAliasArrayDeserializer(item["aliases"]), + }; +} + +export function customEntityAliasArraySerializer(result: Array): any[] { + return result.map((item) => { + return customEntityAliasSerializer(item); + }); +} + +export function customEntityAliasArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return customEntityAliasDeserializer(item); + }); +} + +/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */ +export interface CustomEntityAlias { + /** The text of the alias. */ + text: string; + /** Determine if the alias is case sensitive. */ + caseSensitive?: boolean; + /** Determine if the alias is accent sensitive. */ + accentSensitive?: boolean; + /** Determine the fuzzy edit distance of the alias. */ + fuzzyEditDistance?: number; +} + +export function customEntityAliasSerializer(item: CustomEntityAlias): any { + return { + text: item["text"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + }; +} + +export function customEntityAliasDeserializer(item: any): CustomEntityAlias { + return { + text: item["text"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + }; +} + +/** A skill to translate text from one language to another. */ +export interface TextTranslationSkill extends SearchIndexerSkill { + /** The language code to translate documents into for documents that don't specify the to language explicitly. */ + defaultToLanguageCode: TextTranslationSkillLanguage; + /** The language code to translate documents from for documents that don't specify the from language explicitly. */ + defaultFromLanguageCode?: TextTranslationSkillLanguage; + /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is `en`. */ + suggestedFrom?: TextTranslationSkillLanguage; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.TranslationSkill"; +} + +export function textTranslationSkillSerializer(item: TextTranslationSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultToLanguageCode: item["defaultToLanguageCode"], + defaultFromLanguageCode: item["defaultFromLanguageCode"], + suggestedFrom: item["suggestedFrom"], + }; +} + +export function textTranslationSkillDeserializer(item: any): TextTranslationSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultToLanguageCode: item["defaultToLanguageCode"], + defaultFromLanguageCode: item["defaultFromLanguageCode"], + suggestedFrom: item["suggestedFrom"], + }; +} + +/** The language codes supported for input text by TextTranslationSkill. */ +export enum KnownTextTranslationSkillLanguage { + /** Afrikaans */ + Af = "af", + /** Arabic */ + Ar = "ar", + /** Bangla */ + Bn = "bn", + /** Bosnian (Latin) */ + Bs = "bs", + /** Bulgarian */ + Bg = "bg", + /** Cantonese (Traditional) */ + Yue = "yue", + /** Catalan */ + Ca = "ca", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", + /** Croatian */ + Hr = "hr", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Estonian */ + Et = "et", + /** Fijian */ + Fj = "fj", + /** Filipino */ + Fil = "fil", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Haitian Creole */ + Ht = "ht", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Hmong Daw */ + Mww = "mww", + /** Hungarian */ + Hu = "hu", + /** Icelandic */ + Is = "is", + /** Indonesian */ + Id = "id", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Kiswahili */ + Sw = "sw", + /** Klingon */ + Tlh = "tlh", + /** Klingon (Latin script) */ + TlhLatn = "tlh-Latn", + /** Klingon (Klingon script) */ + TlhPiqd = "tlh-Piqd", + /** Korean */ + Ko = "ko", + /** Latvian */ + Lv = "lv", + /** Lithuanian */ + Lt = "lt", + /** Malagasy */ + Mg = "mg", + /** Malay */ + Ms = "ms", + /** Maltese */ + Mt = "mt", + /** Norwegian */ + Nb = "nb", + /** Persian */ + Fa = "fa", + /** Polish */ + Pl = "pl", + /** Portuguese */ + Pt = "pt", + /** Portuguese (Brazil) */ + PtBr = "pt-br", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Queretaro Otomi */ + Otq = "otq", + /** Romanian */ + Ro = "ro", + /** Russian */ + Ru = "ru", + /** Samoan */ + Sm = "sm", + /** Serbian (Cyrillic) */ + SrCyrl = "sr-Cyrl", + /** Serbian (Latin) */ + SrLatn = "sr-Latn", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Tahitian */ + Ty = "ty", + /** Tamil */ + Ta = "ta", + /** Telugu */ + Te = "te", + /** Thai */ + Th = "th", + /** Tongan */ + To = "to", + /** Turkish */ + Tr = "tr", + /** Ukrainian */ + Uk = "uk", + /** Urdu */ + Ur = "ur", + /** Vietnamese */ + Vi = "vi", + /** Welsh */ + Cy = "cy", + /** Yucatec Maya */ + Yua = "yua", + /** Irish */ + Ga = "ga", + /** Kannada */ + Kn = "kn", + /** Maori */ + Mi = "mi", + /** Malayalam */ + Ml = "ml", + /** Punjabi */ + Pa = "pa", +} + +/** + * The language codes supported for input text by TextTranslationSkill. \ + * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **af**: Afrikaans \ + * **ar**: Arabic \ + * **bn**: Bangla \ + * **bs**: Bosnian (Latin) \ + * **bg**: Bulgarian \ + * **yue**: Cantonese (Traditional) \ + * **ca**: Catalan \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional \ + * **hr**: Croatian \ + * **cs**: Czech \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **et**: Estonian \ + * **fj**: Fijian \ + * **fil**: Filipino \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **ht**: Haitian Creole \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **mww**: Hmong Daw \ + * **hu**: Hungarian \ + * **is**: Icelandic \ + * **id**: Indonesian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **sw**: Kiswahili \ + * **tlh**: Klingon \ + * **tlh-Latn**: Klingon (Latin script) \ + * **tlh-Piqd**: Klingon (Klingon script) \ + * **ko**: Korean \ + * **lv**: Latvian \ + * **lt**: Lithuanian \ + * **mg**: Malagasy \ + * **ms**: Malay \ + * **mt**: Maltese \ + * **nb**: Norwegian \ + * **fa**: Persian \ + * **pl**: Polish \ + * **pt**: Portuguese \ + * **pt-br**: Portuguese (Brazil) \ + * **pt-PT**: Portuguese (Portugal) \ + * **otq**: Queretaro Otomi \ + * **ro**: Romanian \ + * **ru**: Russian \ + * **sm**: Samoan \ + * **sr-Cyrl**: Serbian (Cyrillic) \ + * **sr-Latn**: Serbian (Latin) \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **ty**: Tahitian \ + * **ta**: Tamil \ + * **te**: Telugu \ + * **th**: Thai \ + * **to**: Tongan \ + * **tr**: Turkish \ + * **uk**: Ukrainian \ + * **ur**: Urdu \ + * **vi**: Vietnamese \ + * **cy**: Welsh \ + * **yua**: Yucatec Maya \ + * **ga**: Irish \ + * **kn**: Kannada \ + * **mi**: Maori \ + * **ml**: Malayalam \ + * **pa**: Punjabi + */ +export type TextTranslationSkillLanguage = string; + +/** A skill that extracts content from a file within the enrichment pipeline. */ +export interface DocumentExtractionSkill extends SearchIndexerSkill { + /** The parsingMode for the skill. Will be set to 'default' if not defined. */ + parsingMode?: string; + /** The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. */ + dataToExtract?: string; + /** A dictionary of configurations for the skill. */ + configuration?: Record; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill"; +} + +export function documentExtractionSkillSerializer(item: DocumentExtractionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + parsingMode: item["parsingMode"], + dataToExtract: item["dataToExtract"], + configuration: item["configuration"], + }; +} + +export function documentExtractionSkillDeserializer(item: any): DocumentExtractionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + parsingMode: item["parsingMode"], + dataToExtract: item["dataToExtract"], + configuration: item["configuration"], + }; +} + +/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */ +export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill { + /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ + outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat; + /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ + outputMode?: DocumentIntelligenceLayoutSkillOutputMode; + /** The depth of headers in the markdown output. Default is h6. */ + markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; + /** Controls the cardinality of the content extracted from the document by the skill. */ + extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[]; + /** Controls the cardinality for chunking the content. */ + chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; +} + +export function documentIntelligenceLayoutSkillSerializer( + item: DocumentIntelligenceLayoutSkill, +): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + outputFormat: item["outputFormat"], + outputMode: item["outputMode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : documentIntelligenceLayoutSkillChunkingPropertiesSerializer(item["chunkingProperties"]), + }; +} + +export function documentIntelligenceLayoutSkillDeserializer( + item: any, +): DocumentIntelligenceLayoutSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + outputFormat: item["outputFormat"], + outputMode: item["outputMode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : documentIntelligenceLayoutSkillChunkingPropertiesDeserializer(item["chunkingProperties"]), + }; +} + +/** Controls the cardinality of the output format. Default is 'markdown'. */ +export enum KnownDocumentIntelligenceLayoutSkillOutputFormat { + /** Specify the format of the output as text. */ + Text = "text", + /** Specify the format of the output as markdown. */ + Markdown = "markdown", +} + +/** + * Controls the cardinality of the output format. Default is 'markdown'. \ + * {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Specify the format of the output as text. \ + * **markdown**: Specify the format of the output as markdown. + */ +export type DocumentIntelligenceLayoutSkillOutputFormat = string; + +/** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ +export enum KnownDocumentIntelligenceLayoutSkillOutputMode { + /** Specify that the output should be parsed as 'oneToMany'. */ + OneToMany = "oneToMany", +} + +/** + * Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. \ + * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneToMany**: Specify that the output should be parsed as 'oneToMany'. + */ +export type DocumentIntelligenceLayoutSkillOutputMode = string; + +/** The depth of headers in the markdown output. Default is h6. */ +export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { + /** Header level 1. */ + H1 = "h1", + /** Header level 2. */ + H2 = "h2", + /** Header level 3. */ + H3 = "h3", + /** Header level 4. */ + H4 = "h4", + /** Header level 5. */ + H5 = "h5", + /** Header level 6. */ + H6 = "h6", +} + +/** + * The depth of headers in the markdown output. Default is h6. \ + * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **h1**: Header level 1. \ + * **h2**: Header level 2. \ + * **h3**: Header level 3. \ + * **h4**: Header level 4. \ + * **h5**: Header level 5. \ + * **h6**: Header level 6. + */ +export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; + +/** Controls the cardinality of the content extracted from the document by the skill. */ +export enum KnownDocumentIntelligenceLayoutSkillExtractionOptions { + /** Specify that image content should be extracted from the document. */ + Images = "images", + /** Specify that location metadata should be extracted from the document. */ + LocationMetadata = "locationMetadata", +} + +/** + * Controls the cardinality of the content extracted from the document by the skill. \ + * {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **images**: Specify that image content should be extracted from the document. \ + * **locationMetadata**: Specify that location metadata should be extracted from the document. + */ +export type DocumentIntelligenceLayoutSkillExtractionOptions = string; + +/** Controls the cardinality for chunking the content. */ +export interface DocumentIntelligenceLayoutSkillChunkingProperties { + /** The unit of the chunk. */ + unit?: DocumentIntelligenceLayoutSkillChunkingUnit; + /** The maximum chunk length in characters. Default is 500. */ + maximumLength?: number; + /** The length of overlap provided between two text chunks. Default is 0. */ + overlapLength?: number; +} + +export function documentIntelligenceLayoutSkillChunkingPropertiesSerializer( + item: DocumentIntelligenceLayoutSkillChunkingProperties, +): any { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +export function documentIntelligenceLayoutSkillChunkingPropertiesDeserializer( + item: any, +): DocumentIntelligenceLayoutSkillChunkingProperties { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +/** Controls the cardinality of the chunk unit. Default is 'characters' */ +export enum KnownDocumentIntelligenceLayoutSkillChunkingUnit { + /** Specifies chunk by characters. */ + Characters = "characters", +} + +/** + * Controls the cardinality of the chunk unit. Default is 'characters' \ + * {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: Specifies chunk by characters. + */ +export type DocumentIntelligenceLayoutSkillChunkingUnit = string; + +/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */ +export interface WebApiSkill extends SearchIndexerSkill { + /** The url for the Web API. */ + uri: string; + /** The headers required to make the http request. */ + httpHeaders?: Record; + /** The method for the http request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** The desired batch size which indicates number of documents. */ + batchSize?: number; + /** If set, the number of parallel calls that can be made to the Web API. */ + degreeOfParallelism?: number; + /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; +} + +export function webApiSkillSerializer(item: WebApiSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + uri: item["uri"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + }; +} + +export function webApiSkillDeserializer(item: any): WebApiSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + uri: item["uri"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + }; +} + +/** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */ +export interface AzureMachineLearningSkill extends SearchIndexerSkill { + /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ + scoringUri?: string; + /** (Required for key authentication) The key for the AML service. */ + authenticationKey?: string; + /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ + resourceId?: string; + /** (Optional) When specified, indicates the timeout for the http client making the API call. */ + timeout?: string; + /** (Optional for token authentication). The region the AML service is deployed in. */ + region?: string; + /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */ + degreeOfParallelism?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Custom.AmlSkill"; +} + +export function azureMachineLearningSkillSerializer(item: AzureMachineLearningSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + uri: item["scoringUri"], + key: item["authenticationKey"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + degreeOfParallelism: item["degreeOfParallelism"], + }; +} + +export function azureMachineLearningSkillDeserializer(item: any): AzureMachineLearningSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + scoringUri: item["uri"], + authenticationKey: item["key"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + degreeOfParallelism: item["degreeOfParallelism"], + }; +} + +/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */ +export interface AzureOpenAIEmbeddingSkill extends SearchIndexerSkill { + /** The resource URI of the Azure OpenAI resource. */ + resourceUrl?: string; + /** ID of the Azure OpenAI model deployment on the designated resource. */ + deploymentName?: string; + /** API key of the designated Azure OpenAI resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** The name of the embedding model that is deployed at the provided deploymentId path. */ + modelName?: AzureOpenAIModelName; + /** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */ + dimensions?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; +} + +export function azureOpenAIEmbeddingSkillSerializer(item: AzureOpenAIEmbeddingSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + resourceUri: item["resourceUrl"], + deploymentId: item["deploymentName"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + modelName: item["modelName"], + dimensions: item["dimensions"], + }; +} + +export function azureOpenAIEmbeddingSkillDeserializer(item: any): AzureOpenAIEmbeddingSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + resourceUrl: item["resourceUri"], + deploymentName: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + modelName: item["modelName"], + dimensions: item["dimensions"], + }; +} + +/** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */ +export interface VisionVectorizeSkill extends SearchIndexerSkill { + /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ + modelVersion: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; +} + +export function visionVectorizeSkillSerializer(item: VisionVectorizeSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + modelVersion: item["modelVersion"], + }; +} + +export function visionVectorizeSkillDeserializer(item: any): VisionVectorizeSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + modelVersion: item["modelVersion"], + }; +} + +/** A skill that leverages Azure AI Content Understanding to process and extract structured insights from documents, enabling enriched, searchable content for enhanced document indexing and retrieval. */ +export interface ContentUnderstandingSkill extends SearchIndexerSkill { + /** Controls the cardinality of the content extracted from the document by the skill. */ + extractionOptions?: ContentUnderstandingSkillExtractionOptions[]; + /** Controls the cardinality for chunking the content. */ + chunkingProperties?: ContentUnderstandingSkillChunkingProperties; + /** A URI fragment specifying the type of skill. */ + odataType: "#Microsoft.Skills.Util.ContentUnderstandingSkill"; +} + +export function contentUnderstandingSkillSerializer(item: ContentUnderstandingSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : contentUnderstandingSkillChunkingPropertiesSerializer(item["chunkingProperties"]), + "@odata.type": item["odataType"], + }; +} + +export function contentUnderstandingSkillDeserializer(item: any): ContentUnderstandingSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : contentUnderstandingSkillChunkingPropertiesDeserializer(item["chunkingProperties"]), + odataType: item["@odata.type"], + }; +} + +/** Controls the cardinality of the content extracted from the document by the skill. */ +export enum KnownContentUnderstandingSkillExtractionOptions { + /** Specify that image content should be extracted from the document. */ + Images = "images", + /** Specify that location metadata should be extracted from the document. */ + LocationMetadata = "locationMetadata", +} + +/** + * Controls the cardinality of the content extracted from the document by the skill. \ + * {@link KnownContentUnderstandingSkillExtractionOptions} can be used interchangeably with ContentUnderstandingSkillExtractionOptions, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **images**: Specify that image content should be extracted from the document. \ + * **locationMetadata**: Specify that location metadata should be extracted from the document. + */ +export type ContentUnderstandingSkillExtractionOptions = string; + +/** Controls the cardinality for chunking the content. */ +export interface ContentUnderstandingSkillChunkingProperties { + /** The unit of the chunk. */ + unit?: ContentUnderstandingSkillChunkingUnit; + /** The maximum chunk length in characters. Default is 500. */ + maximumLength?: number; + /** The length of overlap provided between two text chunks. Default is 0. */ + overlapLength?: number; +} + +export function contentUnderstandingSkillChunkingPropertiesSerializer( + item: ContentUnderstandingSkillChunkingProperties, +): any { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +export function contentUnderstandingSkillChunkingPropertiesDeserializer( + item: any, +): ContentUnderstandingSkillChunkingProperties { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +/** Controls the cardinality of the chunk unit. Default is 'characters' */ +export enum KnownContentUnderstandingSkillChunkingUnit { + /** Specifies chunk by characters. */ + Characters = "characters", +} + +/** + * Controls the cardinality of the chunk unit. Default is 'characters' \ + * {@link KnownContentUnderstandingSkillChunkingUnit} can be used interchangeably with ContentUnderstandingSkillChunkingUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: Specifies chunk by characters. + */ +export type ContentUnderstandingSkillChunkingUnit = string; + +/** A skill that calls a language model via Azure AI Foundry's Chat Completions endpoint. */ +export interface ChatCompletionSkill extends SearchIndexerSkill { + /** The url for the Web API. */ + uri: string; + /** The headers required to make the http request. */ + httpHeaders?: WebApiHttpHeaders; + /** The method for the http request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** The desired batch size which indicates number of documents. */ + batchSize?: number; + /** If set, the number of parallel calls that can be made to the Web API. */ + degreeOfParallelism?: number; + /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** API key for authenticating to the model. Both apiKey and authIdentity cannot be specified at the same time. */ + apiKey?: string; + /** Common language model parameters that customers can tweak. If omitted, reasonable defaults will be applied. */ + commonModelParameters?: CommonModelParameters; + /** Open-type dictionary for model-specific parameters that should be appended to the chat completions call. Follows Azure AI Foundry's extensibility pattern. */ + extraParameters?: Record; + /** How extra parameters are handled by Azure AI Foundry. Default is 'error'. */ + extraParametersBehavior?: ChatCompletionExtraParametersBehavior; + /** Determines how the LLM should format its response. Defaults to 'text' response type. */ + responseFormat?: ChatCompletionResponseFormat; + /** A URI fragment specifying the type of skill. */ + odataType: "#Microsoft.Skills.Custom.ChatCompletionSkill"; +} + +export function chatCompletionSkillSerializer(item: ChatCompletionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + uri: item["uri"], + httpHeaders: !item["httpHeaders"] + ? item["httpHeaders"] + : webApiHttpHeadersSerializer(item["httpHeaders"]), + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + apiKey: item["apiKey"], + commonModelParameters: !item["commonModelParameters"] + ? item["commonModelParameters"] + : commonModelParametersSerializer(item["commonModelParameters"]), + extraParameters: item["extraParameters"], + extraParametersBehavior: item["extraParametersBehavior"], + responseFormat: !item["responseFormat"] + ? item["responseFormat"] + : chatCompletionResponseFormatSerializer(item["responseFormat"]), + "@odata.type": item["odataType"], + }; +} + +export function chatCompletionSkillDeserializer(item: any): ChatCompletionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + uri: item["uri"], + httpHeaders: !item["httpHeaders"] + ? item["httpHeaders"] + : webApiHttpHeadersDeserializer(item["httpHeaders"]), + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + apiKey: item["apiKey"], + commonModelParameters: !item["commonModelParameters"] + ? item["commonModelParameters"] + : commonModelParametersDeserializer(item["commonModelParameters"]), + extraParameters: item["extraParameters"], + extraParametersBehavior: item["extraParametersBehavior"], + responseFormat: !item["responseFormat"] + ? item["responseFormat"] + : chatCompletionResponseFormatDeserializer(item["responseFormat"]), + odataType: item["@odata.type"], + }; +} + +/** A dictionary of http request headers. */ +export interface WebApiHttpHeaders { + /** Additional properties */ + additionalProperties?: Record; +} + +export function webApiHttpHeadersSerializer(item: WebApiHttpHeaders): any { + return { ...serializeRecord(item.additionalProperties ?? {}) }; +} + +export function webApiHttpHeadersDeserializer(item: any): WebApiHttpHeaders { + return { + additionalProperties: serializeRecord(item, []), + }; +} + +/** Common language model parameters for Chat Completions. If omitted, default values are used. */ +export interface CommonModelParameters { + /** The name of the model to use (e.g., 'gpt-4o', etc.). Default is null if not specified. */ + modelName?: string; + /** A float in the range [-2,2] that reduces or increases likelihood of repeated tokens. Default is 0. */ + frequencyPenalty?: number; + /** A float in the range [-2,2] that penalizes new tokens based on their existing presence. Default is 0. */ + presencePenalty?: number; + /** Maximum number of tokens to generate. */ + maxTokens?: number; + /** Sampling temperature. Default is 0.7. */ + temperature?: number; + /** Random seed for controlling deterministic outputs. If omitted, randomization is used. */ + seed?: number; + /** List of stop sequences that will cut off text generation. Default is none. */ + stop?: string[]; +} + +export function commonModelParametersSerializer(item: CommonModelParameters): any { + return { + model: item["modelName"], + frequencyPenalty: item["frequencyPenalty"], + presencePenalty: item["presencePenalty"], + maxTokens: item["maxTokens"], + temperature: item["temperature"], + seed: item["seed"], + stop: !item["stop"] + ? item["stop"] + : item["stop"].map((p: any) => { + return p; + }), + }; +} + +export function commonModelParametersDeserializer(item: any): CommonModelParameters { + return { + modelName: item["model"], + frequencyPenalty: item["frequencyPenalty"], + presencePenalty: item["presencePenalty"], + maxTokens: item["maxTokens"], + temperature: item["temperature"], + seed: item["seed"], + stop: !item["stop"] + ? item["stop"] + : item["stop"].map((p: any) => { + return p; + }), + }; +} + +/** Specifies how 'extraParameters' should be handled by Azure AI Foundry. Defaults to 'error'. */ +export enum KnownChatCompletionExtraParametersBehavior { + /** Passes any extra parameters directly to the model. */ + PassThrough = "passThrough", + /** Drops all extra parameters. */ + Drop = "drop", + /** Raises an error if any extra parameter is present. */ + Error = "error", +} + +/** + * Specifies how 'extraParameters' should be handled by Azure AI Foundry. Defaults to 'error'. \ + * {@link KnownChatCompletionExtraParametersBehavior} can be used interchangeably with ChatCompletionExtraParametersBehavior, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **passThrough**: Passes any extra parameters directly to the model. \ + * **drop**: Drops all extra parameters. \ + * **error**: Raises an error if any extra parameter is present. + */ +export type ChatCompletionExtraParametersBehavior = string; + +/** Determines how the language model's response should be serialized. Defaults to 'text'. */ +export interface ChatCompletionResponseFormat { + /** Specifies how the LLM should format the response. */ + type?: ChatCompletionResponseFormatType; + /** An open dictionary for extended properties. Required if 'type' == 'json_schema' */ + jsonSchemaProperties?: ChatCompletionSchemaProperties; +} + +export function chatCompletionResponseFormatSerializer(item: ChatCompletionResponseFormat): any { + return { + type: item["type"], + jsonSchemaProperties: !item["jsonSchemaProperties"] + ? item["jsonSchemaProperties"] + : chatCompletionSchemaPropertiesSerializer(item["jsonSchemaProperties"]), + }; +} + +export function chatCompletionResponseFormatDeserializer(item: any): ChatCompletionResponseFormat { + return { + type: item["type"], + jsonSchemaProperties: !item["jsonSchemaProperties"] + ? item["jsonSchemaProperties"] + : chatCompletionSchemaPropertiesDeserializer(item["jsonSchemaProperties"]), + }; +} + +/** Specifies how the LLM should format the response. */ +export enum KnownChatCompletionResponseFormatType { + /** Plain text response format. */ + Text = "text", + /** Arbitrary JSON object response format. */ + JsonObject = "jsonObject", + /** JSON schema-adhering response format. */ + JsonSchema = "jsonSchema", +} + +/** + * Specifies how the LLM should format the response. \ + * {@link KnownChatCompletionResponseFormatType} can be used interchangeably with ChatCompletionResponseFormatType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Plain text response format. \ + * **jsonObject**: Arbitrary JSON object response format. \ + * **jsonSchema**: JSON schema-adhering response format. + */ +export type ChatCompletionResponseFormatType = string; + +/** Properties for JSON schema response format. */ +export interface ChatCompletionSchemaProperties { + /** Name of the json schema the model will adhere to. */ + name?: string; + /** Description of the json schema the model will adhere to. */ + description?: string; + /** Whether or not the model's response should use structured outputs. Default is true. */ + strict?: boolean; + /** The schema definition. */ + schema?: ChatCompletionSchema; +} + +export function chatCompletionSchemaPropertiesSerializer( + item: ChatCompletionSchemaProperties, +): any { + return { + name: item["name"], + description: item["description"], + strict: item["strict"], + schema: !item["schema"] ? item["schema"] : chatCompletionSchemaSerializer(item["schema"]), + }; +} + +export function chatCompletionSchemaPropertiesDeserializer( + item: any, +): ChatCompletionSchemaProperties { + return { + name: item["name"], + description: item["description"], + strict: item["strict"], + schema: !item["schema"] ? item["schema"] : chatCompletionSchemaDeserializer(item["schema"]), + }; +} + +/** Object defining the custom schema the model will use to structure its output. */ +export interface ChatCompletionSchema { + /** Type of schema representation. Usually 'object'. Default is 'object'. */ + type?: string; + /** A JSON-formatted string that defines the output schema's properties and constraints for the model. */ + properties?: string; + /** An array of the property names that are required to be part of the model's response. */ + required?: string[]; + /** Controls whether it is allowable for an object to contain additional keys / values that were not defined in the JSON Schema. Default is false. */ + additionalProperties?: boolean; +} + +export function chatCompletionSchemaSerializer(item: ChatCompletionSchema): any { + return { + type: item["type"], + properties: item["properties"], + required: !item["required"] + ? item["required"] + : item["required"].map((p: any) => { + return p; + }), + additionalProperties: item["additionalProperties"], + }; +} + +export function chatCompletionSchemaDeserializer(item: any): ChatCompletionSchema { + return { + type: item["type"], + properties: item["properties"], + required: !item["required"] + ? item["required"] + : item["required"].map((p: any) => { + return p; + }), + additionalProperties: item["additionalProperties"], + }; +} + +/** Base type for describing any Azure AI service resource attached to a skillset. */ +export interface CognitiveServicesAccount { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.DefaultCognitiveServices, #Microsoft.Azure.Search.CognitiveServicesByKey, #Microsoft.Azure.Search.AIServicesByKey, #Microsoft.Azure.Search.AIServicesByIdentity */ + odatatype: string; + /** Description of the Azure AI service resource attached to a skillset. */ + description?: string; +} + +export function cognitiveServicesAccountSerializer(item: CognitiveServicesAccount): any { + return { "@odata.type": item["odatatype"], description: item["description"] }; +} + +export function cognitiveServicesAccountDeserializer(item: any): CognitiveServicesAccount { + return { + odatatype: item["@odata.type"], + description: item["description"], + }; +} + +/** Alias for CognitiveServicesAccountUnion */ +export type CognitiveServicesAccountUnion = + | DefaultCognitiveServicesAccount + | CognitiveServicesAccountKey + | AIServicesAccountKey + | AIServicesAccountIdentity + | CognitiveServicesAccount; + +export function cognitiveServicesAccountUnionSerializer(item: CognitiveServicesAccountUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DefaultCognitiveServices": + return defaultCognitiveServicesAccountSerializer(item as DefaultCognitiveServicesAccount); + + case "#Microsoft.Azure.Search.CognitiveServicesByKey": + return cognitiveServicesAccountKeySerializer(item as CognitiveServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByKey": + return aiServicesAccountKeySerializer(item as AIServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByIdentity": + return aiServicesAccountIdentitySerializer(item as AIServicesAccountIdentity); + + default: + return cognitiveServicesAccountSerializer(item); + } +} + +export function cognitiveServicesAccountUnionDeserializer( + item: any, +): CognitiveServicesAccountUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DefaultCognitiveServices": + return defaultCognitiveServicesAccountDeserializer(item as DefaultCognitiveServicesAccount); + + case "#Microsoft.Azure.Search.CognitiveServicesByKey": + return cognitiveServicesAccountKeyDeserializer(item as CognitiveServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByKey": + return aiServicesAccountKeyDeserializer(item as AIServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByIdentity": + return aiServicesAccountIdentityDeserializer(item as AIServicesAccountIdentity); + + default: + return cognitiveServicesAccountDeserializer(item); + } +} + +/** An empty object that represents the default Azure AI service resource for a skillset. */ +export interface DefaultCognitiveServicesAccount extends CognitiveServicesAccount { + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; +} + +export function defaultCognitiveServicesAccountSerializer( + item: DefaultCognitiveServicesAccount, +): any { + return { "@odata.type": item["odatatype"], description: item["description"] }; +} + +export function defaultCognitiveServicesAccountDeserializer( + item: any, +): DefaultCognitiveServicesAccount { + return { + odatatype: item["@odata.type"], + description: item["description"], + }; +} + +/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */ +export interface CognitiveServicesAccountKey extends CognitiveServicesAccount { + /** The key used to provision the Azure AI service resource attached to a skillset. */ + key: string; + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; +} + +export function cognitiveServicesAccountKeySerializer(item: CognitiveServicesAccountKey): any { + return { + "@odata.type": item["odatatype"], + description: item["description"], + key: item["key"], + }; +} + +export function cognitiveServicesAccountKeyDeserializer(item: any): CognitiveServicesAccountKey { + return { + odatatype: item["@odata.type"], + description: item["description"], + key: item["key"], + }; +} + +/** The account key of an Azure AI service resource that's attached to a skillset, to be used with the resource's subdomain. */ +export interface AIServicesAccountKey extends CognitiveServicesAccount { + /** The key used to provision the Azure AI service resource attached to a skillset. */ + key: string; + /** The subdomain url for the corresponding AI Service. */ + subdomainUrl: string; + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.AIServicesByKey"; +} + +export function aiServicesAccountKeySerializer(item: AIServicesAccountKey): any { + return { + "@odata.type": item["odatatype"], + description: item["description"], + key: item["key"], + subdomainUrl: item["subdomainUrl"], + }; +} + +export function aiServicesAccountKeyDeserializer(item: any): AIServicesAccountKey { + return { + odatatype: item["@odata.type"], + description: item["description"], + key: item["key"], + subdomainUrl: item["subdomainUrl"], + }; +} + +/** The multi-region account of an Azure AI service resource that's attached to a skillset. */ +export interface AIServicesAccountIdentity extends CognitiveServicesAccount { + /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** The subdomain url for the corresponding AI Service. */ + subdomainUrl: string; + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; +} + +export function aiServicesAccountIdentitySerializer(item: AIServicesAccountIdentity): any { + return { + "@odata.type": item["odatatype"], + description: item["description"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + subdomainUrl: item["subdomainUrl"], + }; +} + +export function aiServicesAccountIdentityDeserializer(item: any): AIServicesAccountIdentity { + return { + odatatype: item["@odata.type"], + description: item["description"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + subdomainUrl: item["subdomainUrl"], + }; +} + +/** Definition of additional projections to azure blob, table, or files, of enriched data. */ +export interface SearchIndexerKnowledgeStore { + /** The connection string to the storage account projections will be stored in. */ + storageConnectionString: string; + /** A list of additional projections to perform during indexing. */ + projections: SearchIndexerKnowledgeStoreProjection[]; + /** The user-assigned managed identity used for connections to Azure Storage when writing knowledge store projections. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + parameters?: SearchIndexerKnowledgeStoreParameters; +} + +export function searchIndexerKnowledgeStoreSerializer(item: SearchIndexerKnowledgeStore): any { + return { + storageConnectionString: item["storageConnectionString"], + projections: searchIndexerKnowledgeStoreProjectionArraySerializer(item["projections"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerKnowledgeStoreParametersSerializer(item["parameters"]), + }; +} + +export function searchIndexerKnowledgeStoreDeserializer(item: any): SearchIndexerKnowledgeStore { + return { + storageConnectionString: item["storageConnectionString"], + projections: searchIndexerKnowledgeStoreProjectionArrayDeserializer(item["projections"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerKnowledgeStoreParametersDeserializer(item["parameters"]), + }; +} + +export function searchIndexerKnowledgeStoreProjectionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreProjectionSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreProjectionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreProjectionDeserializer(item); + }); +} + +/** Container object for various projection selectors. */ +export interface SearchIndexerKnowledgeStoreProjection { + /** Projections to Azure Table storage. */ + tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[]; + /** Projections to Azure Blob storage. */ + objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[]; + /** Projections to Azure File storage. */ + files?: SearchIndexerKnowledgeStoreFileProjectionSelector[]; +} + +export function searchIndexerKnowledgeStoreProjectionSerializer( + item: SearchIndexerKnowledgeStoreProjection, +): any { + return { + tables: !item["tables"] + ? item["tables"] + : searchIndexerKnowledgeStoreTableProjectionSelectorArraySerializer(item["tables"]), + objects: !item["objects"] + ? item["objects"] + : searchIndexerKnowledgeStoreObjectProjectionSelectorArraySerializer(item["objects"]), + files: !item["files"] + ? item["files"] + : searchIndexerKnowledgeStoreFileProjectionSelectorArraySerializer(item["files"]), + }; +} + +export function searchIndexerKnowledgeStoreProjectionDeserializer( + item: any, +): SearchIndexerKnowledgeStoreProjection { + return { + tables: !item["tables"] + ? item["tables"] + : searchIndexerKnowledgeStoreTableProjectionSelectorArrayDeserializer(item["tables"]), + objects: !item["objects"] + ? item["objects"] + : searchIndexerKnowledgeStoreObjectProjectionSelectorArrayDeserializer(item["objects"]), + files: !item["files"] + ? item["files"] + : searchIndexerKnowledgeStoreFileProjectionSelectorArrayDeserializer(item["files"]), + }; +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreTableProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreTableProjectionSelectorDeserializer(item); + }); +} + +/** Description for what data to store in Azure Tables. */ +export interface SearchIndexerKnowledgeStoreTableProjectionSelector + extends SearchIndexerKnowledgeStoreProjectionSelector { + /** Name of the Azure table to store projected data in. */ + tableName: string; +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreTableProjectionSelector, +): any { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + tableName: item["tableName"], + }; +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreTableProjectionSelector { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + tableName: item["tableName"], + }; +} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreObjectProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreObjectProjectionSelectorDeserializer(item); + }); +} + +/** Projection definition for what data to store in Azure Blob. */ +export interface SearchIndexerKnowledgeStoreObjectProjectionSelector + extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreObjectProjectionSelector, +): any { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreObjectProjectionSelector { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreFileProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreFileProjectionSelectorDeserializer(item); + }); +} + +/** Projection definition for what data to store in Azure Files. */ +export interface SearchIndexerKnowledgeStoreFileProjectionSelector + extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreFileProjectionSelector, +): any { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreFileProjectionSelector { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface SearchIndexerKnowledgeStoreParameters { + /** Whether or not projections should synthesize a generated key name if one isn't already present. */ + synthesizeGeneratedKeyName?: boolean; + /** Additional properties */ + additionalProperties?: Record; +} + +export function searchIndexerKnowledgeStoreParametersSerializer( + item: SearchIndexerKnowledgeStoreParameters, +): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + synthesizeGeneratedKeyName: item["synthesizeGeneratedKeyName"], + }; +} + +export function searchIndexerKnowledgeStoreParametersDeserializer( + item: any, +): SearchIndexerKnowledgeStoreParameters { + return { + additionalProperties: serializeRecord(item, ["synthesizeGeneratedKeyName"]), + synthesizeGeneratedKeyName: item["synthesizeGeneratedKeyName"], + }; +} + +/** Definition of additional projections to secondary search indexes. */ +export interface SearchIndexerIndexProjection { + /** A list of projections to be performed to secondary search indexes. */ + selectors: SearchIndexerIndexProjectionSelector[]; + /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + parameters?: SearchIndexerIndexProjectionsParameters; +} + +export function searchIndexerIndexProjectionSerializer(item: SearchIndexerIndexProjection): any { + return { + selectors: searchIndexerIndexProjectionSelectorArraySerializer(item["selectors"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerIndexProjectionsParametersSerializer(item["parameters"]), + }; +} + +export function searchIndexerIndexProjectionDeserializer(item: any): SearchIndexerIndexProjection { + return { + selectors: searchIndexerIndexProjectionSelectorArrayDeserializer(item["selectors"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerIndexProjectionsParametersDeserializer(item["parameters"]), + }; +} + +export function searchIndexerIndexProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerIndexProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerIndexProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerIndexProjectionSelectorDeserializer(item); + }); +} + +/** Description for what data to store in the designated search index. */ +export interface SearchIndexerIndexProjectionSelector { + /** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */ + targetIndexName: string; + /** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */ + parentKeyFieldName: string; + /** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */ + sourceContext: string; + /** Mappings for the projection, or which source should be mapped to which field in the target index. */ + mappings: InputFieldMappingEntry[]; +} + +export function searchIndexerIndexProjectionSelectorSerializer( + item: SearchIndexerIndexProjectionSelector, +): any { + return { + targetIndexName: item["targetIndexName"], + parentKeyFieldName: item["parentKeyFieldName"], + sourceContext: item["sourceContext"], + mappings: inputFieldMappingEntryArraySerializer(item["mappings"]), + }; +} + +export function searchIndexerIndexProjectionSelectorDeserializer( + item: any, +): SearchIndexerIndexProjectionSelector { + return { + targetIndexName: item["targetIndexName"], + parentKeyFieldName: item["parentKeyFieldName"], + sourceContext: item["sourceContext"], + mappings: inputFieldMappingEntryArrayDeserializer(item["mappings"]), + }; +} + +/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface SearchIndexerIndexProjectionsParameters { + /** Defines behavior of the index projections in relation to the rest of the indexer. */ + projectionMode?: IndexProjectionMode; + /** Additional properties */ + additionalProperties?: Record; +} + +export function searchIndexerIndexProjectionsParametersSerializer( + item: SearchIndexerIndexProjectionsParameters, +): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + projectionMode: item["projectionMode"], + }; +} + +export function searchIndexerIndexProjectionsParametersDeserializer( + item: any, +): SearchIndexerIndexProjectionsParameters { + return { + additionalProperties: serializeRecord(item, ["projectionMode"]), + projectionMode: item["projectionMode"], + }; +} + +/** Defines behavior of the index projections in relation to the rest of the indexer. */ +export enum KnownIndexProjectionMode { + /** The source document will be skipped from writing into the indexer's target index. */ + SkipIndexingParentDocuments = "skipIndexingParentDocuments", + /** The source document will be written into the indexer's target index. This is the default pattern. */ + IncludeIndexingParentDocuments = "includeIndexingParentDocuments", +} + +/** + * Defines behavior of the index projections in relation to the rest of the indexer. \ + * {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \ + * **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern. + */ +export type IndexProjectionMode = string; + +/** Abstract class to share properties between concrete selectors. */ +export interface SearchIndexerKnowledgeStoreProjectionSelector { + /** Name of reference key to different projection. */ + referenceKeyName?: string; + /** Name of generated key to store projection under. */ + generatedKeyName?: string; + /** Source data to project. */ + source?: string; + /** Source context for complex projections. */ + sourceContext?: string; + /** Nested inputs for complex projections. */ + inputs?: InputFieldMappingEntry[]; +} + +export function searchIndexerKnowledgeStoreProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreProjectionSelector, +): any { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreProjectionSelector { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +/** Abstract class to share properties between concrete selectors. */ +export interface SearchIndexerKnowledgeStoreBlobProjectionSelector + extends SearchIndexerKnowledgeStoreProjectionSelector { + /** Blob container to store projections in. */ + storageContainer: string; +} + +export function searchIndexerKnowledgeStoreBlobProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreBlobProjectionSelector, +): any { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + storageContainer: item["storageContainer"], + }; +} + +export function searchIndexerKnowledgeStoreBlobProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreBlobProjectionSelector { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + storageContainer: item["storageContainer"], + }; +} + +/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */ +export interface ListSkillsetsResult { + /** The skillsets defined in the Search service. */ + skillsets: SearchIndexerSkillset[]; +} + +export function listSkillsetsResultDeserializer(item: any): ListSkillsetsResult { + return { + skillsets: searchIndexerSkillsetArrayDeserializer(item["value"]), + }; +} + +export function searchIndexerSkillsetArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerSkillsetSerializer(item); + }); +} + +export function searchIndexerSkillsetArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerSkillsetDeserializer(item); + }); +} + +/** The type of the skill names. */ +export interface SkillNames { + /** the names of skills to be reset. */ + skillNames?: string[]; +} + +export function skillNamesSerializer(item: SkillNames): any { + return { + skillNames: !item["skillNames"] + ? item["skillNames"] + : item["skillNames"].map((p: any) => { + return p; + }), + }; +} + +/** Request body for resync indexer operation. */ +export interface IndexerResyncBody { + /** Indexer to re-ingest pre-selected permissions data from data source to index. */ + options?: IndexerResyncOption[]; +} + +export function indexerResyncBodySerializer(item: IndexerResyncBody): any { + return { + options: !item["options"] + ? item["options"] + : item["options"].map((p: any) => { + return p; + }), + }; +} + +/** Options with various types of permission data to index. */ +export enum KnownIndexerResyncOption { + /** Indexer to re-ingest pre-selected permissions data from data source to index. */ + Permissions = "permissions", +} + +/** + * Options with various types of permission data to index. \ + * {@link KnownIndexerResyncOption} can be used interchangeably with IndexerResyncOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **permissions**: Indexer to re-ingest pre-selected permissions data from data source to index. + */ +export type IndexerResyncOption = string; diff --git a/sdk/search/search-documents/generated/models/azure/search/documents/knowledgeBase/index.ts b/sdk/search/search-documents/generated/models/azure/search/documents/knowledgeBase/index.ts new file mode 100644 index 000000000000..8e30a97720d9 --- /dev/null +++ b/sdk/search/search-documents/generated/models/azure/search/documents/knowledgeBase/index.ts @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + KnowledgeRetrievalReasoningEffort, + KnowledgeRetrievalReasoningEffortUnion, + KnownKnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalHighReasoningEffort, + KnownKnowledgeRetrievalOutputMode, + KnowledgeRetrievalOutputMode, + KnowledgeBaseRetrievalRequest, + KnowledgeBaseMessage, + KnowledgeBaseMessageContent, + KnowledgeBaseMessageContentUnion, + KnownKnowledgeBaseMessageContentType, + KnowledgeBaseMessageContentType, + KnowledgeBaseMessageTextContent, + KnowledgeBaseMessageImageContent, + KnowledgeBaseImageContent, + KnowledgeRetrievalIntent, + KnowledgeRetrievalIntentUnion, + KnownKnowledgeRetrievalIntentType, + KnowledgeRetrievalIntentType, + KnowledgeRetrievalSemanticIntent, + KnowledgeSourceParams, + KnowledgeSourceParamsUnion, + SearchIndexKnowledgeSourceParams, + AzureBlobKnowledgeSourceParams, + IndexedSharePointKnowledgeSourceParams, + IndexedOneLakeKnowledgeSourceParams, + WebKnowledgeSourceParams, + RemoteSharePointKnowledgeSourceParams, + KnowledgeBaseRetrievalResponse, + KnowledgeBaseActivityRecord, + KnowledgeBaseActivityRecordUnion, + KnowledgeBaseErrorDetail, + KnowledgeBaseErrorAdditionalInfo, + KnowledgeBaseModelQueryPlanningActivityRecord, + KnowledgeBaseModelAnswerSynthesisActivityRecord, + KnowledgeBaseAgenticReasoningActivityRecord, + KnowledgeBaseReference, + KnowledgeBaseReferenceUnion, + KnowledgeBaseSearchIndexReference, + KnowledgeBaseAzureBlobReference, + KnowledgeBaseIndexedSharePointReference, + KnowledgeBaseIndexedOneLakeReference, + KnowledgeBaseWebReference, + KnowledgeBaseRemoteSharePointReference, + SharePointSensitivityLabelInfo, +} from "./models.js"; diff --git a/sdk/search/search-documents/generated/models/azure/search/documents/knowledgeBase/models.ts b/sdk/search/search-documents/generated/models/azure/search/documents/knowledgeBase/models.ts new file mode 100644 index 000000000000..cf332775d037 --- /dev/null +++ b/sdk/search/search-documents/generated/models/azure/search/documents/knowledgeBase/models.ts @@ -0,0 +1,1185 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { KnowledgeBaseActivityRecordType, KnowledgeBaseReferenceType } from "../../../../models.js"; +import { KnowledgeSourceKind } from "../indexes/models.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Base type for reasoning effort. */ +export interface KnowledgeRetrievalReasoningEffort { + /** The kind of reasoning effort. */ + /** The discriminator possible values: minimal, low, medium, high */ + kind: KnowledgeRetrievalReasoningEffortKind; +} + +export function knowledgeRetrievalReasoningEffortSerializer( + item: KnowledgeRetrievalReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Alias for KnowledgeRetrievalReasoningEffortUnion */ +export type KnowledgeRetrievalReasoningEffortUnion = + | KnowledgeRetrievalMinimalReasoningEffort + | KnowledgeRetrievalLowReasoningEffort + | KnowledgeRetrievalMediumReasoningEffort + | KnowledgeRetrievalHighReasoningEffort + | KnowledgeRetrievalReasoningEffort; + +export function knowledgeRetrievalReasoningEffortUnionSerializer( + item: KnowledgeRetrievalReasoningEffortUnion, +): any { + switch (item.kind) { + case "minimal": + return knowledgeRetrievalMinimalReasoningEffortSerializer( + item as KnowledgeRetrievalMinimalReasoningEffort, + ); + + case "low": + return knowledgeRetrievalLowReasoningEffortSerializer( + item as KnowledgeRetrievalLowReasoningEffort, + ); + + case "medium": + return knowledgeRetrievalMediumReasoningEffortSerializer( + item as KnowledgeRetrievalMediumReasoningEffort, + ); + + case "high": + return knowledgeRetrievalHighReasoningEffortSerializer( + item as KnowledgeRetrievalHighReasoningEffort, + ); + + default: + return knowledgeRetrievalReasoningEffortSerializer(item); + } +} + +export function knowledgeRetrievalReasoningEffortUnionDeserializer( + item: any, +): KnowledgeRetrievalReasoningEffortUnion { + switch (item.kind) { + case "minimal": + return knowledgeRetrievalMinimalReasoningEffortDeserializer( + item as KnowledgeRetrievalMinimalReasoningEffort, + ); + + case "low": + return knowledgeRetrievalLowReasoningEffortDeserializer( + item as KnowledgeRetrievalLowReasoningEffort, + ); + + case "medium": + return knowledgeRetrievalMediumReasoningEffortDeserializer( + item as KnowledgeRetrievalMediumReasoningEffort, + ); + + case "high": + return knowledgeRetrievalHighReasoningEffortDeserializer( + item as KnowledgeRetrievalHighReasoningEffort, + ); + + default: + return knowledgeRetrievalReasoningEffortDeserializer(item); + } +} + +/** The amount of effort to use during retrieval. */ +export enum KnownKnowledgeRetrievalReasoningEffortKind { + /** Does not perform any source selections, any query planning, or any iterative search. */ + Minimal = "minimal", + /** Use low reasoning during retrieval. */ + Low = "low", + /** Use a moderate amount of reasoning during retrieval. */ + Medium = "medium", + /** Use a high amount of reasoning during retrieval. */ + High = "high", +} + +/** + * The amount of effort to use during retrieval. \ + * {@link KnownKnowledgeRetrievalReasoningEffortKind} can be used interchangeably with KnowledgeRetrievalReasoningEffortKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **minimal**: Does not perform any source selections, any query planning, or any iterative search. \ + * **low**: Use low reasoning during retrieval. \ + * **medium**: Use a moderate amount of reasoning during retrieval. \ + * **high**: Use a high amount of reasoning during retrieval. + */ +export type KnowledgeRetrievalReasoningEffortKind = string; + +/** Run knowledge retrieval with minimal reasoning effort. */ +export interface KnowledgeRetrievalMinimalReasoningEffort + extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "minimal"; +} + +export function knowledgeRetrievalMinimalReasoningEffortSerializer( + item: KnowledgeRetrievalMinimalReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalMinimalReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalMinimalReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Run knowledge retrieval with low reasoning effort. */ +export interface KnowledgeRetrievalLowReasoningEffort extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "low"; +} + +export function knowledgeRetrievalLowReasoningEffortSerializer( + item: KnowledgeRetrievalLowReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalLowReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalLowReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Run knowledge retrieval with medium reasoning effort. */ +export interface KnowledgeRetrievalMediumReasoningEffort extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "medium"; +} + +export function knowledgeRetrievalMediumReasoningEffortSerializer( + item: KnowledgeRetrievalMediumReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalMediumReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalMediumReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Run knowledge retrieval with high reasoning effort. */ +export interface KnowledgeRetrievalHighReasoningEffort extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "high"; +} + +export function knowledgeRetrievalHighReasoningEffortSerializer( + item: KnowledgeRetrievalHighReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalHighReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalHighReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** The output configuration for this retrieval. */ +export enum KnownKnowledgeRetrievalOutputMode { + /** Return data from the knowledge sources directly without generative alteration. */ + ExtractiveData = "extractiveData", + /** Synthesize an answer for the response payload. */ + AnswerSynthesis = "answerSynthesis", +} + +/** + * The output configuration for this retrieval. \ + * {@link KnownKnowledgeRetrievalOutputMode} can be used interchangeably with KnowledgeRetrievalOutputMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **extractiveData**: Return data from the knowledge sources directly without generative alteration. \ + * **answerSynthesis**: Synthesize an answer for the response payload. + */ +export type KnowledgeRetrievalOutputMode = string; + +/** The input contract for the retrieval request. */ +export interface KnowledgeBaseRetrievalRequest { + /** A list of chat message style input. */ + messages?: KnowledgeBaseMessage[]; + /** A list of intended queries to execute without model query planning. */ + intents?: KnowledgeRetrievalIntentUnion[]; + /** The maximum runtime in seconds. */ + maxRuntimeInSeconds?: number; + /** Limits the maximum size of the content in the output. */ + maxOutputSize?: number; + /** The retrieval reasoning effort configuration. */ + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; + /** Indicates retrieval results should include activity information. */ + includeActivity?: boolean; + /** The output configuration for this retrieval. */ + outputMode?: KnowledgeRetrievalOutputMode; + /** A list of runtime parameters for the knowledge sources. */ + knowledgeSourceParams?: KnowledgeSourceParamsUnion[]; +} + +export function knowledgeBaseRetrievalRequestSerializer(item: KnowledgeBaseRetrievalRequest): any { + return { + messages: !item["messages"] + ? item["messages"] + : knowledgeBaseMessageArraySerializer(item["messages"]), + intents: !item["intents"] + ? item["intents"] + : knowledgeRetrievalIntentUnionArraySerializer(item["intents"]), + maxRuntimeInSeconds: item["maxRuntimeInSeconds"], + maxOutputSize: item["maxOutputSize"], + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionSerializer(item["retrievalReasoningEffort"]), + includeActivity: item["includeActivity"], + outputMode: item["outputMode"], + knowledgeSourceParams: !item["knowledgeSourceParams"] + ? item["knowledgeSourceParams"] + : knowledgeSourceParamsUnionArraySerializer(item["knowledgeSourceParams"]), + }; +} + +export function knowledgeBaseMessageArraySerializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseMessageSerializer(item); + }); +} + +export function knowledgeBaseMessageArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseMessageDeserializer(item); + }); +} + +/** The natural language message style object. */ +export interface KnowledgeBaseMessage { + /** The role of the tool response. */ + role?: string; + /** The content of the message. */ + content: KnowledgeBaseMessageContentUnion[]; +} + +export function knowledgeBaseMessageSerializer(item: KnowledgeBaseMessage): any { + return { + role: item["role"], + content: knowledgeBaseMessageContentUnionArraySerializer(item["content"]), + }; +} + +export function knowledgeBaseMessageDeserializer(item: any): KnowledgeBaseMessage { + return { + role: item["role"], + content: knowledgeBaseMessageContentUnionArrayDeserializer(item["content"]), + }; +} + +export function knowledgeBaseMessageContentUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseMessageContentUnionSerializer(item); + }); +} + +export function knowledgeBaseMessageContentUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseMessageContentUnionDeserializer(item); + }); +} + +/** Specifies the type of the message content. */ +export interface KnowledgeBaseMessageContent { + /** The type of the message */ + /** The discriminator possible values: text, image */ + type: KnowledgeBaseMessageContentType; +} + +export function knowledgeBaseMessageContentSerializer(item: KnowledgeBaseMessageContent): any { + return { type: item["type"] }; +} + +export function knowledgeBaseMessageContentDeserializer(item: any): KnowledgeBaseMessageContent { + return { + type: item["type"], + }; +} + +/** Alias for KnowledgeBaseMessageContentUnion */ +export type KnowledgeBaseMessageContentUnion = + | KnowledgeBaseMessageTextContent + | KnowledgeBaseMessageImageContent + | KnowledgeBaseMessageContent; + +export function knowledgeBaseMessageContentUnionSerializer( + item: KnowledgeBaseMessageContentUnion, +): any { + switch (item.type) { + case "text": + return knowledgeBaseMessageTextContentSerializer(item as KnowledgeBaseMessageTextContent); + + case "image": + return knowledgeBaseMessageImageContentSerializer(item as KnowledgeBaseMessageImageContent); + + default: + return knowledgeBaseMessageContentSerializer(item); + } +} + +export function knowledgeBaseMessageContentUnionDeserializer( + item: any, +): KnowledgeBaseMessageContentUnion { + switch (item.type) { + case "text": + return knowledgeBaseMessageTextContentDeserializer(item as KnowledgeBaseMessageTextContent); + + case "image": + return knowledgeBaseMessageImageContentDeserializer(item as KnowledgeBaseMessageImageContent); + + default: + return knowledgeBaseMessageContentDeserializer(item); + } +} + +/** The type of message content. */ +export enum KnownKnowledgeBaseMessageContentType { + /** Text message content kind. */ + Text = "text", + /** Image message content kind. */ + Image = "image", +} + +/** + * The type of message content. \ + * {@link KnownKnowledgeBaseMessageContentType} can be used interchangeably with KnowledgeBaseMessageContentType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Text message content kind. \ + * **image**: Image message content kind. + */ +export type KnowledgeBaseMessageContentType = string; + +/** Text message type. */ +export interface KnowledgeBaseMessageTextContent extends KnowledgeBaseMessageContent { + /** The discriminator value. */ + type: "text"; + /** The text content. */ + text: string; +} + +export function knowledgeBaseMessageTextContentSerializer( + item: KnowledgeBaseMessageTextContent, +): any { + return { type: item["type"], text: item["text"] }; +} + +export function knowledgeBaseMessageTextContentDeserializer( + item: any, +): KnowledgeBaseMessageTextContent { + return { + type: item["type"], + text: item["text"], + }; +} + +/** Image message type. */ +export interface KnowledgeBaseMessageImageContent extends KnowledgeBaseMessageContent { + /** The discriminator value. */ + type: "image"; + /** The image content. */ + image: KnowledgeBaseImageContent; +} + +export function knowledgeBaseMessageImageContentSerializer( + item: KnowledgeBaseMessageImageContent, +): any { + return { + type: item["type"], + image: knowledgeBaseImageContentSerializer(item["image"]), + }; +} + +export function knowledgeBaseMessageImageContentDeserializer( + item: any, +): KnowledgeBaseMessageImageContent { + return { + type: item["type"], + image: knowledgeBaseImageContentDeserializer(item["image"]), + }; +} + +/** Image content. */ +export interface KnowledgeBaseImageContent { + /** The url of the image. */ + url: string; +} + +export function knowledgeBaseImageContentSerializer(item: KnowledgeBaseImageContent): any { + return { url: item["url"] }; +} + +export function knowledgeBaseImageContentDeserializer(item: any): KnowledgeBaseImageContent { + return { + url: item["url"], + }; +} + +export function knowledgeRetrievalIntentUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeRetrievalIntentUnionSerializer(item); + }); +} + +/** An intended query to execute without model query planning. */ +export interface KnowledgeRetrievalIntent { + /** The type of the intent. */ + /** The discriminator possible values: semantic */ + type: KnowledgeRetrievalIntentType; +} + +export function knowledgeRetrievalIntentSerializer(item: KnowledgeRetrievalIntent): any { + return { type: item["type"] }; +} + +/** Alias for KnowledgeRetrievalIntentUnion */ +export type KnowledgeRetrievalIntentUnion = + | KnowledgeRetrievalSemanticIntent + | KnowledgeRetrievalIntent; + +export function knowledgeRetrievalIntentUnionSerializer(item: KnowledgeRetrievalIntentUnion): any { + switch (item.type) { + case "semantic": + return knowledgeRetrievalSemanticIntentSerializer(item as KnowledgeRetrievalSemanticIntent); + + default: + return knowledgeRetrievalIntentSerializer(item); + } +} + +/** The kind of knowledge base configuration to use. */ +export enum KnownKnowledgeRetrievalIntentType { + /** A natural language semantic query intent. */ + Semantic = "semantic", +} + +/** + * The kind of knowledge base configuration to use. \ + * {@link KnownKnowledgeRetrievalIntentType} can be used interchangeably with KnowledgeRetrievalIntentType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **semantic**: A natural language semantic query intent. + */ +export type KnowledgeRetrievalIntentType = string; + +/** A semantic query intent. */ +export interface KnowledgeRetrievalSemanticIntent extends KnowledgeRetrievalIntent { + /** The discriminator value. */ + type: "semantic"; + /** The semantic query to execute */ + search: string; +} + +export function knowledgeRetrievalSemanticIntentSerializer( + item: KnowledgeRetrievalSemanticIntent, +): any { + return { type: item["type"], search: item["search"] }; +} + +export function knowledgeSourceParamsUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeSourceParamsUnionSerializer(item); + }); +} + +/** Base type for knowledge source runtime parameters. */ +export interface KnowledgeSourceParams { + /** The name of the index the params apply to. */ + knowledgeSourceName: string; + /** Indicates whether references should be included for data retrieved from this source. */ + includeReferences?: boolean; + /** Indicates whether references should include the structured data obtained during retrieval in their payload. */ + includeReferenceSourceData?: boolean; + /** Indicates that this knowledge source should bypass source selection and always be queried at retrieval time. */ + alwaysQuerySource?: boolean; + /** The reranker threshold all retrieved documents must meet to be included in the response. */ + rerankerThreshold?: number; + /** The type of the knowledge source. */ + /** The discriminator possible values: searchIndex, azureBlob, indexedSharePoint, indexedOneLake, web, remoteSharePoint */ + kind: KnowledgeSourceKind; +} + +export function knowledgeSourceParamsSerializer(item: KnowledgeSourceParams): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Alias for KnowledgeSourceParamsUnion */ +export type KnowledgeSourceParamsUnion = + | SearchIndexKnowledgeSourceParams + | AzureBlobKnowledgeSourceParams + | IndexedSharePointKnowledgeSourceParams + | IndexedOneLakeKnowledgeSourceParams + | WebKnowledgeSourceParams + | RemoteSharePointKnowledgeSourceParams + | KnowledgeSourceParams; + +export function knowledgeSourceParamsUnionSerializer(item: KnowledgeSourceParamsUnion): any { + switch (item.kind) { + case "searchIndex": + return searchIndexKnowledgeSourceParamsSerializer(item as SearchIndexKnowledgeSourceParams); + + case "azureBlob": + return azureBlobKnowledgeSourceParamsSerializer(item as AzureBlobKnowledgeSourceParams); + + case "indexedSharePoint": + return indexedSharePointKnowledgeSourceParamsSerializer( + item as IndexedSharePointKnowledgeSourceParams, + ); + + case "indexedOneLake": + return indexedOneLakeKnowledgeSourceParamsSerializer( + item as IndexedOneLakeKnowledgeSourceParams, + ); + + case "web": + return webKnowledgeSourceParamsSerializer(item as WebKnowledgeSourceParams); + + case "remoteSharePoint": + return remoteSharePointKnowledgeSourceParamsSerializer( + item as RemoteSharePointKnowledgeSourceParams, + ); + + default: + return knowledgeSourceParamsSerializer(item); + } +} + +/** Specifies runtime parameters for a search index knowledge source */ +export interface SearchIndexKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "searchIndex"; + /** A filter condition applied to the index (e.g., 'State eq VA'). */ + filterAddOn?: string; +} + +export function searchIndexKnowledgeSourceParamsSerializer( + item: SearchIndexKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + filterAddOn: item["filterAddOn"], + }; +} + +/** Specifies runtime parameters for a azure blob knowledge source */ +export interface AzureBlobKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "azureBlob"; +} + +export function azureBlobKnowledgeSourceParamsSerializer( + item: AzureBlobKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Specifies runtime parameters for a indexed SharePoint knowledge source */ +export interface IndexedSharePointKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "indexedSharePoint"; +} + +export function indexedSharePointKnowledgeSourceParamsSerializer( + item: IndexedSharePointKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Specifies runtime parameters for a indexed OneLake knowledge source */ +export interface IndexedOneLakeKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "indexedOneLake"; +} + +export function indexedOneLakeKnowledgeSourceParamsSerializer( + item: IndexedOneLakeKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Specifies runtime parameters for a web knowledge source */ +export interface WebKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "web"; + /** The language of the web results. */ + language?: string; + /** The market of the web results. */ + market?: string; + /** The number of web results to return. */ + count?: number; + /** The freshness of web results. */ + freshness?: string; +} + +export function webKnowledgeSourceParamsSerializer(item: WebKnowledgeSourceParams): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + language: item["language"], + market: item["market"], + count: item["count"], + freshness: item["freshness"], + }; +} + +/** Specifies runtime parameters for a remote SharePoint knowledge source */ +export interface RemoteSharePointKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "remoteSharePoint"; + /** Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to scope the retrieval before the query runs. See documentation: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference */ + filterExpression?: string; + /** A list of metadata fields to be returned for each item in the response. Only retrievable metadata properties can be included in this list. By default, no metadata is returned. Optional. */ + resourceMetadata?: string[]; + /** Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint Online. */ + containerTypeId?: string; +} + +export function remoteSharePointKnowledgeSourceParamsSerializer( + item: RemoteSharePointKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + filterExpression: item["filterExpression"], + resourceMetadata: !item["resourceMetadata"] + ? item["resourceMetadata"] + : item["resourceMetadata"].map((p: any) => { + return p; + }), + containerTypeId: item["containerTypeId"], + }; +} + +/** The output contract for the retrieval response. */ +export interface KnowledgeBaseRetrievalResponse { + /** The response messages. */ + response?: KnowledgeBaseMessage[]; + /** The activity records for tracking progress and billing implications. */ + activity?: KnowledgeBaseActivityRecordUnion[]; + /** The references for the retrieval data used in the response. */ + references?: KnowledgeBaseReferenceUnion[]; +} + +export function knowledgeBaseRetrievalResponseDeserializer( + item: any, +): KnowledgeBaseRetrievalResponse { + return { + response: !item["response"] + ? item["response"] + : knowledgeBaseMessageArrayDeserializer(item["response"]), + activity: !item["activity"] + ? item["activity"] + : knowledgeBaseActivityRecordUnionArrayDeserializer(item["activity"]), + references: !item["references"] + ? item["references"] + : knowledgeBaseReferenceUnionArrayDeserializer(item["references"]), + }; +} + +export function knowledgeBaseActivityRecordUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseActivityRecordUnionDeserializer(item); + }); +} + +/** Base type for activity records. Tracks execution details, timing, and errors for knowledge base operations. */ +export interface KnowledgeBaseActivityRecord { + /** The ID of the activity record. */ + id: number; + /** The type of the activity record. */ + /** The discriminator possible values: modelQueryPlanning, modelAnswerSynthesis, agenticReasoning */ + type: KnowledgeBaseActivityRecordType; + /** The elapsed time in milliseconds for the retrieval activity. */ + elapsedMs?: number; + /** The error detail explaining why the operation failed. This property is only included when the activity does not succeed. */ + error?: KnowledgeBaseErrorDetail; +} + +export function knowledgeBaseActivityRecordDeserializer(item: any): KnowledgeBaseActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + }; +} + +/** Alias for KnowledgeBaseActivityRecordUnion */ +export type KnowledgeBaseActivityRecordUnion = + | KnowledgeBaseModelQueryPlanningActivityRecord + | KnowledgeBaseModelAnswerSynthesisActivityRecord + | KnowledgeBaseAgenticReasoningActivityRecord + | KnowledgeBaseActivityRecord; + +export function knowledgeBaseActivityRecordUnionDeserializer( + item: any, +): KnowledgeBaseActivityRecordUnion { + switch (item.type) { + case "modelQueryPlanning": + return knowledgeBaseModelQueryPlanningActivityRecordDeserializer( + item as KnowledgeBaseModelQueryPlanningActivityRecord, + ); + + case "modelAnswerSynthesis": + return knowledgeBaseModelAnswerSynthesisActivityRecordDeserializer( + item as KnowledgeBaseModelAnswerSynthesisActivityRecord, + ); + + case "agenticReasoning": + return knowledgeBaseAgenticReasoningActivityRecordDeserializer( + item as KnowledgeBaseAgenticReasoningActivityRecord, + ); + + default: + return knowledgeBaseActivityRecordDeserializer(item); + } +} + +/** The error details. */ +export interface KnowledgeBaseErrorDetail { + /** The error code. */ + code?: string; + /** The error message. */ + message?: string; + /** The error target. */ + target?: string; + /** The error details. */ + details?: KnowledgeBaseErrorDetail[]; + /** The error additional info. */ + additionalInfo?: KnowledgeBaseErrorAdditionalInfo[]; +} + +export function knowledgeBaseErrorDetailDeserializer(item: any): KnowledgeBaseErrorDetail { + return { + code: item["code"], + message: item["message"], + target: item["target"], + details: !item["details"] + ? item["details"] + : knowledgeBaseErrorDetailArrayDeserializer(item["details"]), + additionalInfo: !item["additionalInfo"] + ? item["additionalInfo"] + : knowledgeBaseErrorAdditionalInfoArrayDeserializer(item["additionalInfo"]), + }; +} + +export function knowledgeBaseErrorDetailArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseErrorDetailDeserializer(item); + }); +} + +export function knowledgeBaseErrorAdditionalInfoArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseErrorAdditionalInfoDeserializer(item); + }); +} + +/** The resource management error additional info. */ +export interface KnowledgeBaseErrorAdditionalInfo { + /** The additional info type. */ + type?: string; + /** The additional info. */ + info?: Record; +} + +export function knowledgeBaseErrorAdditionalInfoDeserializer( + item: any, +): KnowledgeBaseErrorAdditionalInfo { + return { + type: item["type"], + info: item["info"], + }; +} + +/** Represents an LLM query planning activity record. */ +export interface KnowledgeBaseModelQueryPlanningActivityRecord extends KnowledgeBaseActivityRecord { + /** The discriminator value. */ + type: "modelQueryPlanning"; + /** The number of input tokens for the LLM query planning activity. */ + inputTokens?: number; + /** The number of output tokens for the LLM query planning activity. */ + outputTokens?: number; +} + +export function knowledgeBaseModelQueryPlanningActivityRecordDeserializer( + item: any, +): KnowledgeBaseModelQueryPlanningActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + inputTokens: item["inputTokens"], + outputTokens: item["outputTokens"], + }; +} + +/** Represents an LLM answer synthesis activity record. */ +export interface KnowledgeBaseModelAnswerSynthesisActivityRecord + extends KnowledgeBaseActivityRecord { + /** The discriminator value. */ + type: "modelAnswerSynthesis"; + /** The number of input tokens for the LLM answer synthesis activity. */ + inputTokens?: number; + /** The number of output tokens for the LLM answer synthesis activity. */ + outputTokens?: number; +} + +export function knowledgeBaseModelAnswerSynthesisActivityRecordDeserializer( + item: any, +): KnowledgeBaseModelAnswerSynthesisActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + inputTokens: item["inputTokens"], + outputTokens: item["outputTokens"], + }; +} + +/** Represents an agentic reasoning activity record. */ +export interface KnowledgeBaseAgenticReasoningActivityRecord extends KnowledgeBaseActivityRecord { + /** The discriminator value. */ + type: "agenticReasoning"; + /** The number of input tokens for agentic reasoning. */ + reasoningTokens?: number; + /** The retrieval reasoning effort configuration. */ + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; +} + +export function knowledgeBaseAgenticReasoningActivityRecordDeserializer( + item: any, +): KnowledgeBaseAgenticReasoningActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + reasoningTokens: item["reasoningTokens"], + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionDeserializer(item["retrievalReasoningEffort"]), + }; +} + +export function knowledgeBaseReferenceUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseReferenceUnionDeserializer(item); + }); +} + +/** Base type for references. */ +export interface KnowledgeBaseReference { + /** The type of the reference. */ + /** The discriminator possible values: searchIndex, azureBlob, indexedSharePoint, indexedOneLake, web, remoteSharePoint */ + type: KnowledgeBaseReferenceType; + /** The ID of the reference. */ + id: string; + /** The source activity ID for the reference. */ + activitySource: number; + /** The source data for the reference. */ + sourceData?: Record; + /** The reranker score for the document reference. */ + rerankerScore?: number; +} + +export function knowledgeBaseReferenceDeserializer(item: any): KnowledgeBaseReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + }; +} + +/** Alias for KnowledgeBaseReferenceUnion */ +export type KnowledgeBaseReferenceUnion = + | KnowledgeBaseSearchIndexReference + | KnowledgeBaseAzureBlobReference + | KnowledgeBaseIndexedSharePointReference + | KnowledgeBaseIndexedOneLakeReference + | KnowledgeBaseWebReference + | KnowledgeBaseRemoteSharePointReference + | KnowledgeBaseReference; + +export function knowledgeBaseReferenceUnionDeserializer(item: any): KnowledgeBaseReferenceUnion { + switch (item.type) { + case "searchIndex": + return knowledgeBaseSearchIndexReferenceDeserializer( + item as KnowledgeBaseSearchIndexReference, + ); + + case "azureBlob": + return knowledgeBaseAzureBlobReferenceDeserializer(item as KnowledgeBaseAzureBlobReference); + + case "indexedSharePoint": + return knowledgeBaseIndexedSharePointReferenceDeserializer( + item as KnowledgeBaseIndexedSharePointReference, + ); + + case "indexedOneLake": + return knowledgeBaseIndexedOneLakeReferenceDeserializer( + item as KnowledgeBaseIndexedOneLakeReference, + ); + + case "web": + return knowledgeBaseWebReferenceDeserializer(item as KnowledgeBaseWebReference); + + case "remoteSharePoint": + return knowledgeBaseRemoteSharePointReferenceDeserializer( + item as KnowledgeBaseRemoteSharePointReference, + ); + + default: + return knowledgeBaseReferenceDeserializer(item); + } +} + +/** Represents an Azure Search document reference. */ +export interface KnowledgeBaseSearchIndexReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "searchIndex"; + /** The document key for the reference. */ + docKey?: string; +} + +export function knowledgeBaseSearchIndexReferenceDeserializer( + item: any, +): KnowledgeBaseSearchIndexReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + docKey: item["docKey"], + }; +} + +/** Represents an Azure Blob Storage document reference. */ +export interface KnowledgeBaseAzureBlobReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "azureBlob"; + /** The blob URL for the reference. */ + blobUrl?: string; +} + +export function knowledgeBaseAzureBlobReferenceDeserializer( + item: any, +): KnowledgeBaseAzureBlobReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + blobUrl: item["blobUrl"], + }; +} + +/** Represents an indexed SharePoint document reference. */ +export interface KnowledgeBaseIndexedSharePointReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "indexedSharePoint"; + /** The document URL for the reference. */ + docUrl?: string; +} + +export function knowledgeBaseIndexedSharePointReferenceDeserializer( + item: any, +): KnowledgeBaseIndexedSharePointReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + docUrl: item["docUrl"], + }; +} + +/** Represents an indexed OneLake document reference. */ +export interface KnowledgeBaseIndexedOneLakeReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "indexedOneLake"; + /** The document URL for the reference. */ + docUrl?: string; +} + +export function knowledgeBaseIndexedOneLakeReferenceDeserializer( + item: any, +): KnowledgeBaseIndexedOneLakeReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + docUrl: item["docUrl"], + }; +} + +/** Represents a web document reference. */ +export interface KnowledgeBaseWebReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "web"; + /** The url the reference data originated from. */ + url: string; + /** The title of the web document. */ + title?: string; +} + +export function knowledgeBaseWebReferenceDeserializer(item: any): KnowledgeBaseWebReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + url: item["url"], + title: item["title"], + }; +} + +/** Represents a remote SharePoint document reference. */ +export interface KnowledgeBaseRemoteSharePointReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "remoteSharePoint"; + /** The url the reference data originated from. */ + webUrl: string; + /** Information about the sensitivity label applied to the SharePoint document. */ + searchSensitivityLabelInfo?: SharePointSensitivityLabelInfo; +} + +export function knowledgeBaseRemoteSharePointReferenceDeserializer( + item: any, +): KnowledgeBaseRemoteSharePointReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + webUrl: item["webUrl"], + searchSensitivityLabelInfo: !item["searchSensitivityLabelInfo"] + ? item["searchSensitivityLabelInfo"] + : sharePointSensitivityLabelInfoDeserializer(item["searchSensitivityLabelInfo"]), + }; +} + +/** Information about the sensitivity label applied to a SharePoint document. */ +export interface SharePointSensitivityLabelInfo { + /** The display name for the sensitivity label. */ + displayName?: string; + /** The ID of the sensitivity label. */ + sensitivityLabelId?: string; + /** The tooltip that should be displayed for the label in a UI. */ + tooltip?: string; + /** The priority in which the sensitivity label is applied. */ + priority?: number; + /** The color that the UI should display for the label, if configured. */ + color?: string; + /** Indicates whether the sensitivity label enforces encryption. */ + isEncrypted?: boolean; +} + +export function sharePointSensitivityLabelInfoDeserializer( + item: any, +): SharePointSensitivityLabelInfo { + return { + displayName: item["displayName"], + sensitivityLabelId: item["sensitivityLabelId"], + tooltip: item["tooltip"], + priority: item["priority"], + color: item["color"], + isEncrypted: item["isEncrypted"], + }; +} diff --git a/sdk/search/search-documents/generated/models/azure/search/documents/models.ts b/sdk/search/search-documents/generated/models/azure/search/documents/models.ts new file mode 100644 index 000000000000..c62a04e178a9 --- /dev/null +++ b/sdk/search/search-documents/generated/models/azure/search/documents/models.ts @@ -0,0 +1,1935 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { serializeRecord } from "../../../../static-helpers/serialization/serialize-record.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */ +export interface ErrorResponse { + /** The error object. */ + error?: ErrorDetail; +} + +export function errorResponseDeserializer(item: any): ErrorResponse { + return { + error: !item["error"] ? item["error"] : errorDetailDeserializer(item["error"]), + }; +} + +/** The error detail. */ +export interface ErrorDetail { + /** The error code. */ + code?: string; + /** The error message. */ + message?: string; + /** The error target. */ + target?: string; + /** The error details. */ + details?: ErrorDetail[]; + /** The error additional info. */ + additionalInfo?: ErrorAdditionalInfo[]; +} + +export function errorDetailDeserializer(item: any): ErrorDetail { + return { + code: item["code"], + message: item["message"], + target: item["target"], + details: !item["details"] ? item["details"] : errorDetailArrayDeserializer(item["details"]), + additionalInfo: !item["additionalInfo"] + ? item["additionalInfo"] + : errorAdditionalInfoArrayDeserializer(item["additionalInfo"]), + }; +} + +export function errorDetailArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return errorDetailDeserializer(item); + }); +} + +export function errorAdditionalInfoArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return errorAdditionalInfoDeserializer(item); + }); +} + +/** The resource management error additional info. */ +export interface ErrorAdditionalInfo { + /** The additional info type. */ + type?: string; + /** The additional info. */ + info?: Record; +} + +export function errorAdditionalInfoDeserializer(item: any): ErrorAdditionalInfo { + return { + type: item["type"], + info: item["info"], + }; +} + +/** Response containing search results from an index. */ +export interface SearchDocumentsResult { + /** The total count of results found by the search operation, or null if the count was not requested. If present, the count may be greater than the number of results in this response. This can happen if you use the $top or $skip parameters, or if the query can't return all the requested documents in a single response. */ + readonly count?: number; + /** A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not specified in the request. */ + readonly coverage?: number; + /** The facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not include any facet expressions. */ + readonly facets?: Record; + /** The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'. */ + readonly answers?: QueryAnswerResult[]; + /** Debug information that applies to the search results as a whole. */ + readonly debugInfo?: DebugInfo; + /** Continuation JSON payload returned when the query can't return all the requested results in a single response. You can use this JSON along with @odata.nextLink to formulate another POST Search request to get the next part of the search response. */ + readonly nextPageParameters?: SearchRequest; + /** The sequence of results returned by the query. */ + readonly results: SearchResult[]; + /** Continuation URL returned when the query can't return all the requested results in a single response. You can use this URL to formulate another GET or POST Search request to get the next part of the search response. Make sure to use the same verb (GET or POST) as the request that produced this response. */ + readonly nextLink?: string; + /** Reason that a partial response was returned for a semantic ranking request. */ + readonly semanticPartialResponseReason?: SemanticErrorReason; + /** Type of partial response that was returned for a semantic ranking request. */ + readonly semanticPartialResponseType?: SemanticSearchResultsType; + /** Type of query rewrite that was used to retrieve documents. */ + readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; +} + +export function searchDocumentsResultSerializer(item: SearchDocumentsResult): any { + return item; +} + +export function searchDocumentsResultDeserializer(item: any): SearchDocumentsResult { + return { + count: item["@odata.count"], + coverage: item["@search.coverage"], + facets: !item["@search.facets"] + ? item["@search.facets"] + : facetResultArrayRecordDeserializer(item["@search.facets"]), + answers: !item["@search.answers"] + ? item["@search.answers"] + : queryAnswerResultArrayDeserializer(item["@search.answers"]), + debugInfo: !item["@search.debug"] + ? item["@search.debug"] + : debugInfoDeserializer(item["@search.debug"]), + nextPageParameters: !item["@search.nextPageParameters"] + ? item["@search.nextPageParameters"] + : searchRequestDeserializer(item["@search.nextPageParameters"]), + results: searchResultArrayDeserializer(item["value"]), + nextLink: item["@odata.nextLink"], + semanticPartialResponseReason: item["@search.semanticPartialResponseReason"], + semanticPartialResponseType: item["@search.semanticPartialResponseType"], + semanticQueryRewritesResultType: item["@search.semanticQueryRewritesResultType"], + }; +} + +export function facetResultArrayRecordSerializer( + item: Record>, +): Record { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : facetResultArraySerializer(item[key]); + }); + return result; +} + +export function facetResultArrayRecordDeserializer( + item: Record, +): Record> { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : facetResultArrayDeserializer(item[key]); + }); + return result; +} + +export function facetResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return facetResultSerializer(item); + }); +} + +export function facetResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return facetResultDeserializer(item); + }); +} + +/** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */ +export interface FacetResult { + /** The approximate count of documents falling within the bucket described by this facet. */ + count?: number; + /** The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets. */ + readonly facets?: Record; + /** The resulting total sum for the facet when a sum metric is requested. */ + readonly sum?: number; + /** Additional properties */ + additionalProperties?: Record; +} + +export function facetResultSerializer(item: FacetResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + count: item["count"], + }; +} + +export function facetResultDeserializer(item: any): FacetResult { + return { + additionalProperties: serializeRecord(item, ["count", "facets", "sum"]), + count: item["count"], + facets: !item["@search.facets"] + ? item["@search.facets"] + : facetResultArrayRecordDeserializer(item["@search.facets"]), + sum: item["sum"], + }; +} + +export function queryAnswerResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return queryAnswerResultSerializer(item); + }); +} + +export function queryAnswerResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return queryAnswerResultDeserializer(item); + }); +} + +/** An answer is a text passage extracted from the contents of the most relevant documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. */ +export interface QueryAnswerResult { + /** The score value represents how relevant the answer is to the query relative to other answers returned for the query. */ + score?: number; + /** The key of the document the answer was extracted from. */ + key?: string; + /** The text passage extracted from the document contents as the answer. */ + text?: string; + /** Same text passage as in the Text property with highlighted text phrases most relevant to the query. */ + highlights?: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function queryAnswerResultSerializer(item: QueryAnswerResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + score: item["score"], + key: item["key"], + text: item["text"], + highlights: item["highlights"], + }; +} + +export function queryAnswerResultDeserializer(item: any): QueryAnswerResult { + return { + additionalProperties: serializeRecord(item, ["score", "key", "text", "highlights"]), + score: item["score"], + key: item["key"], + text: item["text"], + highlights: item["highlights"], + }; +} + +/** Contains debugging information that can be used to further explore your search results. */ +export interface DebugInfo { + /** Contains debugging information specific to query rewrites. */ + readonly queryRewrites?: QueryRewritesDebugInfo; +} + +export function debugInfoSerializer(item: DebugInfo): any { + return item; +} + +export function debugInfoDeserializer(item: any): DebugInfo { + return { + queryRewrites: !item["queryRewrites"] + ? item["queryRewrites"] + : queryRewritesDebugInfoDeserializer(item["queryRewrites"]), + }; +} + +/** Contains debugging information specific to query rewrites. */ +export interface QueryRewritesDebugInfo { + /** List of query rewrites generated for the text query. */ + readonly text?: QueryRewritesValuesDebugInfo; + /** List of query rewrites generated for the vectorizable text queries. */ + readonly vectors?: QueryRewritesValuesDebugInfo[]; +} + +export function queryRewritesDebugInfoDeserializer(item: any): QueryRewritesDebugInfo { + return { + text: !item["text"] ? item["text"] : queryRewritesValuesDebugInfoDeserializer(item["text"]), + vectors: !item["vectors"] + ? item["vectors"] + : queryRewritesValuesDebugInfoArrayDeserializer(item["vectors"]), + }; +} + +/** Contains debugging information specific to query rewrites. */ +export interface QueryRewritesValuesDebugInfo { + /** The input text to the generative query rewriting model. There may be cases where the user query and the input to the generative model are not identical. */ + readonly inputQuery?: string; + /** List of query rewrites. */ + readonly rewrites?: string[]; +} + +export function queryRewritesValuesDebugInfoDeserializer(item: any): QueryRewritesValuesDebugInfo { + return { + inputQuery: item["inputQuery"], + rewrites: !item["rewrites"] + ? item["rewrites"] + : item["rewrites"].map((p: any) => { + return p; + }), + }; +} + +export function queryRewritesValuesDebugInfoArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return queryRewritesValuesDebugInfoDeserializer(item); + }); +} + +/** Parameters for filtering, sorting, faceting, paging, and other search query behaviors. */ +export interface SearchRequest { + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ + includeTotalCount?: boolean; + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ + facets?: string[]; + /** The OData $filter expression to apply to the search query. */ + filter?: string; + /** The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ + highlightFields?: string; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ + queryType?: QueryType; + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ + scoringStatistics?: ScoringStatistics; + /** A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ + sessionId?: string; + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ + scoringParameters?: string[]; + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ + scoringProfile?: string; + /** Enables a debugging tool that can be used to further explore your reranked results. */ + debug?: QueryDebugMode; + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; + /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ + searchFields?: string; + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ + searchMode?: SearchMode; + /** A value that specifies the language of the search query. */ + queryLanguage?: QueryLanguage; + /** A value that specified the type of the speller to use to spell-correct individual search query terms. */ + querySpeller?: QuerySpellerType; + /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ + select?: string; + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ + skip?: number; + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ + top?: number; + /** The name of a semantic configuration that will be used when processing documents for queries of type semantic. */ + semanticConfigurationName?: string; + /** Allows the user to choose whether a semantic call should fail completely (default / current behavior), or to return partial results. */ + semanticErrorHandling?: SemanticErrorMode; + /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ + semanticMaxWaitInMilliseconds?: number; + /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ + semanticQuery?: string; + /** A value that specifies whether answers should be returned as part of the search response. */ + answers?: QueryAnswerType; + /** A value that specifies whether captions should be returned as part of the search response. */ + captions?: QueryCaptionType; + /** A value that specifies whether query rewrites should be generated to augment the search query. */ + queryRewrites?: QueryRewritesType; + /** The comma-separated list of field names used for semantic ranking. */ + semanticFields?: string; + /** The query parameters for vector and hybrid search queries. */ + vectorQueries?: VectorQueryUnion[]; + /** Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. */ + vectorFilterMode?: VectorFilterMode; + /** The query parameters to configure hybrid search behaviors. */ + hybridSearch?: HybridSearch; +} + +export function searchRequestDeserializer(item: any): SearchRequest { + return { + includeTotalCount: item["count"], + facets: !item["facets"] + ? item["facets"] + : item["facets"].map((p: any) => { + return p; + }), + filter: item["filter"], + highlightFields: item["highlight"], + highlightPostTag: item["highlightPostTag"], + highlightPreTag: item["highlightPreTag"], + minimumCoverage: item["minimumCoverage"], + orderBy: item["orderby"], + queryType: item["queryType"], + scoringStatistics: item["scoringStatistics"], + sessionId: item["sessionId"], + scoringParameters: !item["scoringParameters"] + ? item["scoringParameters"] + : item["scoringParameters"].map((p: any) => { + return p; + }), + scoringProfile: item["scoringProfile"], + debug: item["debug"], + searchText: item["search"], + searchFields: item["searchFields"], + searchMode: item["searchMode"], + queryLanguage: item["queryLanguage"], + querySpeller: item["speller"], + select: item["select"], + skip: item["skip"], + top: item["top"], + semanticConfigurationName: item["semanticConfiguration"], + semanticErrorHandling: item["semanticErrorHandling"], + semanticMaxWaitInMilliseconds: item["semanticMaxWaitInMilliseconds"], + semanticQuery: item["semanticQuery"], + answers: item["answers"], + captions: item["captions"], + queryRewrites: item["queryRewrites"], + semanticFields: item["semanticFields"], + vectorQueries: !item["vectorQueries"] + ? item["vectorQueries"] + : vectorQueryUnionArrayDeserializer(item["vectorQueries"]), + vectorFilterMode: item["vectorFilterMode"], + hybridSearch: !item["hybridSearch"] + ? item["hybridSearch"] + : hybridSearchDeserializer(item["hybridSearch"]), + }; +} + +/** Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax and 'semantic' if query syntax is not needed. */ +export enum KnownQueryType { + /** Uses the simple query syntax for searches. Search text is interpreted using a simple query language that allows for symbols such as +, * and "". Queries are evaluated across all searchable fields by default, unless the searchFields parameter is specified. */ + Simple = "simple", + /** Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene query language which allows field-specific and weighted searches, as well as other advanced features. */ + Full = "full", + /** Best suited for queries expressed in natural language as opposed to keywords. Improves precision of search results by re-ranking the top search results using a ranking model trained on the Web corpus. */ + Semantic = "semantic", +} + +/** + * Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax and 'semantic' if query syntax is not needed. \ + * {@link KnownQueryType} can be used interchangeably with QueryType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **simple**: Uses the simple query syntax for searches. Search text is interpreted using a simple query language that allows for symbols such as +, * and "". Queries are evaluated across all searchable fields by default, unless the searchFields parameter is specified. \ + * **full**: Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene query language which allows field-specific and weighted searches, as well as other advanced features. \ + * **semantic**: Best suited for queries expressed in natural language as opposed to keywords. Improves precision of search results by re-ranking the top search results using a ranking model trained on the Web corpus. + */ +export type QueryType = string; + +/** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ +export enum KnownScoringStatistics { + /** The scoring statistics will be calculated locally for lower latency. */ + Local = "local", + /** The scoring statistics will be calculated globally for more consistent scoring. */ + Global = "global", +} + +/** + * A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. \ + * {@link KnownScoringStatistics} can be used interchangeably with ScoringStatistics, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **local**: The scoring statistics will be calculated locally for lower latency. \ + * **global**: The scoring statistics will be calculated globally for more consistent scoring. + */ +export type ScoringStatistics = string; + +/** Enables a debugging tool that can be used to further explore your search results. You can enable multiple debug modes simultaneously by separating them with a | character, for example: semantic|queryRewrites. */ +export enum KnownQueryDebugMode { + /** No query debugging information will be returned. */ + Disabled = "disabled", + /** Allows the user to further explore their reranked results. */ + Semantic = "semantic", + /** Allows the user to further explore their hybrid and vector query results. */ + Vector = "vector", + /** Allows the user to explore the list of query rewrites generated for their search request. */ + QueryRewrites = "queryRewrites", + /** Allows the user to retrieve scoring information regarding vectors matched within a collection of complex types. */ + InnerHits = "innerHits", + /** Turn on all debug options. */ + All = "all", +} + +/** + * Enables a debugging tool that can be used to further explore your search results. You can enable multiple debug modes simultaneously by separating them with a | character, for example: semantic|queryRewrites. \ + * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **disabled**: No query debugging information will be returned. \ + * **semantic**: Allows the user to further explore their reranked results. \ + * **vector**: Allows the user to further explore their hybrid and vector query results. \ + * **queryRewrites**: Allows the user to explore the list of query rewrites generated for their search request. \ + * **innerHits**: Allows the user to retrieve scoring information regarding vectors matched within a collection of complex types. \ + * **all**: Turn on all debug options. + */ +export type QueryDebugMode = string; + +/** Specifies whether any or all of the search terms must be matched in order to count the document as a match. */ +export enum KnownSearchMode { + /** Any of the search terms must be matched in order to count the document as a match. */ + Any = "any", + /** All of the search terms must be matched in order to count the document as a match. */ + All = "all", +} + +/** + * Specifies whether any or all of the search terms must be matched in order to count the document as a match. \ + * {@link KnownSearchMode} can be used interchangeably with SearchMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **any**: Any of the search terms must be matched in order to count the document as a match. \ + * **all**: All of the search terms must be matched in order to count the document as a match. + */ +export type SearchMode = string; + +/** The language of the query. */ +export enum KnownQueryLanguage { + /** Query language not specified. */ + None = "none", + /** Query language value for English (United States). */ + EnUs = "en-us", + /** Query language value for English (Great Britain). */ + EnGb = "en-gb", + /** Query language value for English (India). */ + EnIn = "en-in", + /** Query language value for English (Canada). */ + EnCa = "en-ca", + /** Query language value for English (Australia). */ + EnAu = "en-au", + /** Query language value for French (France). */ + FrFr = "fr-fr", + /** Query language value for French (Canada). */ + FrCa = "fr-ca", + /** Query language value for German (Germany). */ + DeDe = "de-de", + /** Query language value for Spanish (Spain). */ + EsEs = "es-es", + /** Query language value for Spanish (Mexico). */ + EsMx = "es-mx", + /** Query language value for Chinese (China). */ + ZhCn = "zh-cn", + /** Query language value for Chinese (Taiwan). */ + ZhTw = "zh-tw", + /** Query language value for Portuguese (Brazil). */ + PtBr = "pt-br", + /** Query language value for Portuguese (Portugal). */ + PtPt = "pt-pt", + /** Query language value for Italian (Italy). */ + ItIt = "it-it", + /** Query language value for Japanese (Japan). */ + JaJp = "ja-jp", + /** Query language value for Korean (Korea). */ + KoKr = "ko-kr", + /** Query language value for Russian (Russia). */ + RuRu = "ru-ru", + /** Query language value for Czech (Czech Republic). */ + CsCz = "cs-cz", + /** Query language value for Dutch (Belgium). */ + NlBe = "nl-be", + /** Query language value for Dutch (Netherlands). */ + NlNl = "nl-nl", + /** Query language value for Hungarian (Hungary). */ + HuHu = "hu-hu", + /** Query language value for Polish (Poland). */ + PlPl = "pl-pl", + /** Query language value for Swedish (Sweden). */ + SvSe = "sv-se", + /** Query language value for Turkish (Turkey). */ + TrTr = "tr-tr", + /** Query language value for Hindi (India). */ + HiIn = "hi-in", + /** Query language value for Arabic (Saudi Arabia). */ + ArSa = "ar-sa", + /** Query language value for Arabic (Egypt). */ + ArEg = "ar-eg", + /** Query language value for Arabic (Morocco). */ + ArMa = "ar-ma", + /** Query language value for Arabic (Kuwait). */ + ArKw = "ar-kw", + /** Query language value for Arabic (Jordan). */ + ArJo = "ar-jo", + /** Query language value for Danish (Denmark). */ + DaDk = "da-dk", + /** Query language value for Norwegian (Norway). */ + NoNo = "no-no", + /** Query language value for Bulgarian (Bulgaria). */ + BgBg = "bg-bg", + /** Query language value for Croatian (Croatia). */ + HrHr = "hr-hr", + /** Query language value for Croatian (Bosnia and Herzegovina). */ + HrBa = "hr-ba", + /** Query language value for Malay (Malaysia). */ + MsMy = "ms-my", + /** Query language value for Malay (Brunei Darussalam). */ + MsBn = "ms-bn", + /** Query language value for Slovenian (Slovenia). */ + SlSl = "sl-sl", + /** Query language value for Tamil (India). */ + TaIn = "ta-in", + /** Query language value for Vietnamese (Viet Nam). */ + ViVn = "vi-vn", + /** Query language value for Greek (Greece). */ + ElGr = "el-gr", + /** Query language value for Romanian (Romania). */ + RoRo = "ro-ro", + /** Query language value for Icelandic (Iceland). */ + IsIs = "is-is", + /** Query language value for Indonesian (Indonesia). */ + IdId = "id-id", + /** Query language value for Thai (Thailand). */ + ThTh = "th-th", + /** Query language value for Lithuanian (Lithuania). */ + LtLt = "lt-lt", + /** Query language value for Ukrainian (Ukraine). */ + UkUa = "uk-ua", + /** Query language value for Latvian (Latvia). */ + LvLv = "lv-lv", + /** Query language value for Estonian (Estonia). */ + EtEe = "et-ee", + /** Query language value for Catalan. */ + CaEs = "ca-es", + /** Query language value for Finnish (Finland). */ + FiFi = "fi-fi", + /** Query language value for Serbian (Bosnia and Herzegovina). */ + SrBa = "sr-ba", + /** Query language value for Serbian (Montenegro). */ + SrMe = "sr-me", + /** Query language value for Serbian (Serbia). */ + SrRs = "sr-rs", + /** Query language value for Slovak (Slovakia). */ + SkSk = "sk-sk", + /** Query language value for Norwegian (Norway). */ + NbNo = "nb-no", + /** Query language value for Armenian (Armenia). */ + HyAm = "hy-am", + /** Query language value for Bengali (India). */ + BnIn = "bn-in", + /** Query language value for Basque. */ + EuEs = "eu-es", + /** Query language value for Galician. */ + GlEs = "gl-es", + /** Query language value for Gujarati (India). */ + GuIn = "gu-in", + /** Query language value for Hebrew (Israel). */ + HeIl = "he-il", + /** Query language value for Irish (Ireland). */ + GaIe = "ga-ie", + /** Query language value for Kannada (India). */ + KnIn = "kn-in", + /** Query language value for Malayalam (India). */ + MlIn = "ml-in", + /** Query language value for Marathi (India). */ + MrIn = "mr-in", + /** Query language value for Persian (U.A.E.). */ + FaAe = "fa-ae", + /** Query language value for Punjabi (India). */ + PaIn = "pa-in", + /** Query language value for Telugu (India). */ + TeIn = "te-in", + /** Query language value for Urdu (Pakistan). */ + UrPk = "ur-pk", +} + +/** + * The language of the query. \ + * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Query language not specified. \ + * **en-us**: Query language value for English (United States). \ + * **en-gb**: Query language value for English (Great Britain). \ + * **en-in**: Query language value for English (India). \ + * **en-ca**: Query language value for English (Canada). \ + * **en-au**: Query language value for English (Australia). \ + * **fr-fr**: Query language value for French (France). \ + * **fr-ca**: Query language value for French (Canada). \ + * **de-de**: Query language value for German (Germany). \ + * **es-es**: Query language value for Spanish (Spain). \ + * **es-mx**: Query language value for Spanish (Mexico). \ + * **zh-cn**: Query language value for Chinese (China). \ + * **zh-tw**: Query language value for Chinese (Taiwan). \ + * **pt-br**: Query language value for Portuguese (Brazil). \ + * **pt-pt**: Query language value for Portuguese (Portugal). \ + * **it-it**: Query language value for Italian (Italy). \ + * **ja-jp**: Query language value for Japanese (Japan). \ + * **ko-kr**: Query language value for Korean (Korea). \ + * **ru-ru**: Query language value for Russian (Russia). \ + * **cs-cz**: Query language value for Czech (Czech Republic). \ + * **nl-be**: Query language value for Dutch (Belgium). \ + * **nl-nl**: Query language value for Dutch (Netherlands). \ + * **hu-hu**: Query language value for Hungarian (Hungary). \ + * **pl-pl**: Query language value for Polish (Poland). \ + * **sv-se**: Query language value for Swedish (Sweden). \ + * **tr-tr**: Query language value for Turkish (Turkey). \ + * **hi-in**: Query language value for Hindi (India). \ + * **ar-sa**: Query language value for Arabic (Saudi Arabia). \ + * **ar-eg**: Query language value for Arabic (Egypt). \ + * **ar-ma**: Query language value for Arabic (Morocco). \ + * **ar-kw**: Query language value for Arabic (Kuwait). \ + * **ar-jo**: Query language value for Arabic (Jordan). \ + * **da-dk**: Query language value for Danish (Denmark). \ + * **no-no**: Query language value for Norwegian (Norway). \ + * **bg-bg**: Query language value for Bulgarian (Bulgaria). \ + * **hr-hr**: Query language value for Croatian (Croatia). \ + * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \ + * **ms-my**: Query language value for Malay (Malaysia). \ + * **ms-bn**: Query language value for Malay (Brunei Darussalam). \ + * **sl-sl**: Query language value for Slovenian (Slovenia). \ + * **ta-in**: Query language value for Tamil (India). \ + * **vi-vn**: Query language value for Vietnamese (Viet Nam). \ + * **el-gr**: Query language value for Greek (Greece). \ + * **ro-ro**: Query language value for Romanian (Romania). \ + * **is-is**: Query language value for Icelandic (Iceland). \ + * **id-id**: Query language value for Indonesian (Indonesia). \ + * **th-th**: Query language value for Thai (Thailand). \ + * **lt-lt**: Query language value for Lithuanian (Lithuania). \ + * **uk-ua**: Query language value for Ukrainian (Ukraine). \ + * **lv-lv**: Query language value for Latvian (Latvia). \ + * **et-ee**: Query language value for Estonian (Estonia). \ + * **ca-es**: Query language value for Catalan. \ + * **fi-fi**: Query language value for Finnish (Finland). \ + * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \ + * **sr-me**: Query language value for Serbian (Montenegro). \ + * **sr-rs**: Query language value for Serbian (Serbia). \ + * **sk-sk**: Query language value for Slovak (Slovakia). \ + * **nb-no**: Query language value for Norwegian (Norway). \ + * **hy-am**: Query language value for Armenian (Armenia). \ + * **bn-in**: Query language value for Bengali (India). \ + * **eu-es**: Query language value for Basque. \ + * **gl-es**: Query language value for Galician. \ + * **gu-in**: Query language value for Gujarati (India). \ + * **he-il**: Query language value for Hebrew (Israel). \ + * **ga-ie**: Query language value for Irish (Ireland). \ + * **kn-in**: Query language value for Kannada (India). \ + * **ml-in**: Query language value for Malayalam (India). \ + * **mr-in**: Query language value for Marathi (India). \ + * **fa-ae**: Query language value for Persian (U.A.E.). \ + * **pa-in**: Query language value for Punjabi (India). \ + * **te-in**: Query language value for Telugu (India). \ + * **ur-pk**: Query language value for Urdu (Pakistan). + */ +export type QueryLanguage = string; + +/** Improve search recall by spell-correcting individual search query terms. */ +export enum KnownQuerySpellerType { + /** Speller not enabled. */ + None = "none", + /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */ + Lexicon = "lexicon", +} + +/** + * Improve search recall by spell-correcting individual search query terms. \ + * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Speller not enabled. \ + * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. + */ +export type QuerySpellerType = string; + +/** Allows the user to choose whether a semantic call should fail completely, or to return partial results. */ +export enum KnownSemanticErrorMode { + /** If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. */ + Partial = "partial", + /** If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. */ + Fail = "fail", +} + +/** + * Allows the user to choose whether a semantic call should fail completely, or to return partial results. \ + * {@link KnownSemanticErrorMode} can be used interchangeably with SemanticErrorMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **partial**: If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. \ + * **fail**: If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. + */ +export type SemanticErrorMode = string; + +/** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ +export enum KnownQueryAnswerType { + /** Do not return answers for the query. */ + None = "none", + /** Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. */ + Extractive = "extractive", +} + +/** + * This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. \ + * {@link KnownQueryAnswerType} can be used interchangeably with QueryAnswerType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Do not return answers for the query. \ + * **extractive**: Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. + */ +export type QueryAnswerType = string; + +/** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ +export enum KnownQueryCaptionType { + /** Do not return captions for the query. */ + None = "none", + /** Extracts captions from the matching documents that contain passages relevant to the search query. */ + Extractive = "extractive", +} + +/** + * This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. \ + * {@link KnownQueryCaptionType} can be used interchangeably with QueryCaptionType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Do not return captions for the query. \ + * **extractive**: Extracts captions from the matching documents that contain passages relevant to the search query. + */ +export type QueryCaptionType = string; + +/** This parameter is only valid if the query type is `semantic`. When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. */ +export enum KnownQueryRewritesType { + /** Do not generate additional query rewrites for this query. */ + None = "none", + /** Generate alternative query terms to increase the recall of a search request. */ + Generative = "generative", +} + +/** + * This parameter is only valid if the query type is `semantic`. When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. \ + * {@link KnownQueryRewritesType} can be used interchangeably with QueryRewritesType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Do not generate additional query rewrites for this query. \ + * **generative**: Generate alternative query terms to increase the recall of a search request. + */ +export type QueryRewritesType = string; + +export function vectorQueryUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return vectorQueryUnionSerializer(item); + }); +} + +export function vectorQueryUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return vectorQueryUnionDeserializer(item); + }); +} + +/** The query parameters for vector and hybrid search queries. */ +export interface VectorQuery { + /** Number of nearest neighbors to return as top hits. */ + kNearestNeighbors?: number; + /** Vector Fields of type Collection(Edm.Single) to be included in the vector searched. */ + fields?: string; + /** When true, triggers an exhaustive k-nearest neighbor search across all vectors within the vector index. Useful for scenarios where exact matches are critical, such as determining ground truth values. */ + exhaustive?: boolean; + /** Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method is used on the underlying vector field. */ + oversampling?: number; + /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */ + weight?: number; + /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */ + threshold?: VectorThresholdUnion; + /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead. */ + filterOverride?: string; + /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead. */ + perDocumentVectorLimit?: number; + /** Type of query. */ + /** The discriminator possible values: vector, text, imageUrl, imageBinary */ + kind: VectorQueryKind; +} + +export function vectorQuerySerializer(item: VectorQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + }; +} + +export function vectorQueryDeserializer(item: any): VectorQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + }; +} + +/** Alias for VectorQueryUnion */ +export type VectorQueryUnion = + | VectorizedQuery + | VectorizableTextQuery + | VectorizableImageUrlQuery + | VectorizableImageBinaryQuery + | VectorQuery; + +export function vectorQueryUnionSerializer(item: VectorQueryUnion): any { + switch (item.kind) { + case "vector": + return vectorizedQuerySerializer(item as VectorizedQuery); + + case "text": + return vectorizableTextQuerySerializer(item as VectorizableTextQuery); + + case "imageUrl": + return vectorizableImageUrlQuerySerializer(item as VectorizableImageUrlQuery); + + case "imageBinary": + return vectorizableImageBinaryQuerySerializer(item as VectorizableImageBinaryQuery); + + default: + return vectorQuerySerializer(item); + } +} + +export function vectorQueryUnionDeserializer(item: any): VectorQueryUnion { + switch (item.kind) { + case "vector": + return vectorizedQueryDeserializer(item as VectorizedQuery); + + case "text": + return vectorizableTextQueryDeserializer(item as VectorizableTextQuery); + + case "imageUrl": + return vectorizableImageUrlQueryDeserializer(item as VectorizableImageUrlQuery); + + case "imageBinary": + return vectorizableImageBinaryQueryDeserializer(item as VectorizableImageBinaryQuery); + + default: + return vectorQueryDeserializer(item); + } +} + +/** The threshold used for vector queries. */ +export interface VectorThreshold { + /** Type of threshold. */ + /** The discriminator possible values: vectorSimilarity, searchScore */ + kind: VectorThresholdKind; +} + +export function vectorThresholdSerializer(item: VectorThreshold): any { + return { kind: item["kind"] }; +} + +export function vectorThresholdDeserializer(item: any): VectorThreshold { + return { + kind: item["kind"], + }; +} + +/** Alias for VectorThresholdUnion */ +export type VectorThresholdUnion = + | VectorSimilarityThreshold + | SearchScoreThreshold + | VectorThreshold; + +export function vectorThresholdUnionSerializer(item: VectorThresholdUnion): any { + switch (item.kind) { + case "vectorSimilarity": + return vectorSimilarityThresholdSerializer(item as VectorSimilarityThreshold); + + case "searchScore": + return searchScoreThresholdSerializer(item as SearchScoreThreshold); + + default: + return vectorThresholdSerializer(item); + } +} + +export function vectorThresholdUnionDeserializer(item: any): VectorThresholdUnion { + switch (item.kind) { + case "vectorSimilarity": + return vectorSimilarityThresholdDeserializer(item as VectorSimilarityThreshold); + + case "searchScore": + return searchScoreThresholdDeserializer(item as SearchScoreThreshold); + + default: + return vectorThresholdDeserializer(item); + } +} + +/** The kind of vector query being performed. */ +export enum KnownVectorThresholdKind { + /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ + VectorSimilarity = "vectorSimilarity", + /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ + SearchScore = "searchScore", +} + +/** + * The kind of vector query being performed. \ + * {@link KnownVectorThresholdKind} can be used interchangeably with VectorThresholdKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **vectorSimilarity**: The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. \ + * **searchScore**: The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. + */ +export type VectorThresholdKind = string; + +/** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ +export interface VectorSimilarityThreshold extends VectorThreshold { + /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ + value: number; + /** The kind of threshold used to filter vector queries */ + kind: "vectorSimilarity"; +} + +export function vectorSimilarityThresholdSerializer(item: VectorSimilarityThreshold): any { + return { kind: item["kind"], value: item["value"] }; +} + +export function vectorSimilarityThresholdDeserializer(item: any): VectorSimilarityThreshold { + return { + kind: item["kind"], + value: item["value"], + }; +} + +/** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ +export interface SearchScoreThreshold extends VectorThreshold { + /** The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ + value: number; + /** The kind of threshold used to filter vector queries */ + kind: "searchScore"; +} + +export function searchScoreThresholdSerializer(item: SearchScoreThreshold): any { + return { kind: item["kind"], value: item["value"] }; +} + +export function searchScoreThresholdDeserializer(item: any): SearchScoreThreshold { + return { + kind: item["kind"], + value: item["value"], + }; +} + +/** The kind of vector query being performed. */ +export enum KnownVectorQueryKind { + /** Vector query where a raw vector value is provided. */ + Vector = "vector", + /** Vector query where a text value that needs to be vectorized is provided. */ + Text = "text", + /** Vector query where an url that represents an image value that needs to be vectorized is provided. */ + ImageUrl = "imageUrl", + /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */ + ImageBinary = "imageBinary", +} + +/** + * The kind of vector query being performed. \ + * {@link KnownVectorQueryKind} can be used interchangeably with VectorQueryKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **vector**: Vector query where a raw vector value is provided. \ + * **text**: Vector query where a text value that needs to be vectorized is provided. \ + * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided. \ + * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. + */ +export type VectorQueryKind = string; + +/** The query parameters to use for vector search when a raw vector value is provided. */ +export interface VectorizedQuery extends VectorQuery { + /** The vector representation of a search query. */ + vector: number[]; + /** The kind of vector query being performed. */ + kind: "vector"; +} + +export function vectorizedQuerySerializer(item: VectorizedQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + vector: item["vector"].map((p: any) => { + return p; + }), + }; +} + +export function vectorizedQueryDeserializer(item: any): VectorizedQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + vector: item["vector"].map((p: any) => { + return p; + }), + }; +} + +/** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */ +export interface VectorizableTextQuery extends VectorQuery { + /** The text to be vectorized to perform a vector search query. */ + text: string; + /** Can be configured to let a generative model rewrite the query before sending it to be vectorized. */ + queryRewrites?: QueryRewritesType; + /** The kind of vector query being performed. */ + kind: "text"; +} + +export function vectorizableTextQuerySerializer(item: VectorizableTextQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + text: item["text"], + queryRewrites: item["queryRewrites"], + }; +} + +export function vectorizableTextQueryDeserializer(item: any): VectorizableTextQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + text: item["text"], + queryRewrites: item["queryRewrites"], + }; +} + +/** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */ +export interface VectorizableImageUrlQuery extends VectorQuery { + /** The URL of an image to be vectorized to perform a vector search query. */ + url?: string; + /** The kind of vector query being performed. */ + kind: "imageUrl"; +} + +export function vectorizableImageUrlQuerySerializer(item: VectorizableImageUrlQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + url: item["url"], + }; +} + +export function vectorizableImageUrlQueryDeserializer(item: any): VectorizableImageUrlQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + url: item["url"], + }; +} + +/** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */ +export interface VectorizableImageBinaryQuery extends VectorQuery { + /** The base 64 encoded binary of an image to be vectorized to perform a vector search query. */ + base64Image?: string; + /** The kind of vector query being performed. */ + kind: "imageBinary"; +} + +export function vectorizableImageBinaryQuerySerializer(item: VectorizableImageBinaryQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + base64Image: item["base64Image"], + }; +} + +export function vectorizableImageBinaryQueryDeserializer(item: any): VectorizableImageBinaryQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + base64Image: item["base64Image"], + }; +} + +/** Determines whether or not filters are applied before or after the vector search is performed. */ +export enum KnownVectorFilterMode { + /** The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. */ + PostFilter = "postFilter", + /** The filter will be applied before the search query. */ + PreFilter = "preFilter", + /** The filter will be applied after the global top-k candidate set of vector results is returned. */ + StrictPostFilter = "strictPostFilter", +} + +/** + * Determines whether or not filters are applied before or after the vector search is performed. \ + * {@link KnownVectorFilterMode} can be used interchangeably with VectorFilterMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **postFilter**: The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. \ + * **preFilter**: The filter will be applied before the search query. \ + * **strictPostFilter**: The filter will be applied after the global top-k candidate set of vector results is returned. + */ +export type VectorFilterMode = string; + +/** TThe query parameters to configure hybrid search behaviors. */ +export interface HybridSearch { + /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */ + maxTextRecallSize?: number; + /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */ + countAndFacetMode?: HybridCountAndFacetMode; +} + +export function hybridSearchSerializer(item: HybridSearch): any { + return { + maxTextRecallSize: item["maxTextRecallSize"], + countAndFacetMode: item["countAndFacetMode"], + }; +} + +export function hybridSearchDeserializer(item: any): HybridSearch { + return { + maxTextRecallSize: item["maxTextRecallSize"], + countAndFacetMode: item["countAndFacetMode"], + }; +} + +/** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. The default value is 'countAllResults'. */ +export enum KnownHybridCountAndFacetMode { + /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */ + CountRetrievableResults = "countRetrievableResults", + /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */ + CountAllResults = "countAllResults", +} + +/** + * Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. The default value is 'countAllResults'. \ + * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \ + * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. + */ +export type HybridCountAndFacetMode = string; + +export function searchResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchResultSerializer(item); + }); +} + +export function searchResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchResultDeserializer(item); + }); +} + +/** Contains a document found by a search query, plus associated metadata. */ +export interface SearchResult { + /** The relevance score of the document compared to other documents returned by the query. */ + score: number; + /** The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'. */ + rerankerScore?: number; + /** The relevance score computed by boosting the Reranker Score. Search results are sorted by the RerankerScore/RerankerBoostedScore based on useScoringProfileBoostedRanking in the Semantic Config. RerankerBoostedScore is only returned for queries of type 'semantic'. */ + rerankerBoostedScore?: number; + /** Text fragments from the document that indicate the matching search terms, organized by each applicable field; null if hit highlighting was not enabled for the query. */ + highlights?: Record; + /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'. */ + captions?: QueryCaptionResult[]; + /** Contains debugging information that can be used to further explore your search results. */ + readonly documentDebugInfo?: DocumentDebugInfo[]; + /** Additional properties */ + additionalProperties?: Record; +} + +export function searchResultSerializer(item: SearchResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + "@search.score": item["score"], + "@search.rerankerScore": item["rerankerScore"], + "@search.rerankerBoostedScore": item["rerankerBoostedScore"], + "@search.highlights": item["highlights"], + "@search.captions": !item["captions"] + ? item["captions"] + : queryCaptionResultArraySerializer(item["captions"]), + }; +} + +export function searchResultDeserializer(item: any): SearchResult { + return { + additionalProperties: serializeRecord(item, [ + "score", + "rerankerScore", + "rerankerBoostedScore", + "highlights", + "captions", + "documentDebugInfo", + ]), + score: item["@search.score"], + rerankerScore: item["@search.rerankerScore"], + rerankerBoostedScore: item["@search.rerankerBoostedScore"], + highlights: item["@search.highlights"], + captions: !item["@search.captions"] + ? item["@search.captions"] + : queryCaptionResultArrayDeserializer(item["@search.captions"]), + documentDebugInfo: !item["@search.documentDebugInfo"] + ? item["@search.documentDebugInfo"] + : documentDebugInfoArrayDeserializer(item["@search.documentDebugInfo"]), + }; +} + +export function queryCaptionResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return queryCaptionResultSerializer(item); + }); +} + +export function queryCaptionResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return queryCaptionResultDeserializer(item); + }); +} + +/** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */ +export interface QueryCaptionResult { + /** A representative text passage extracted from the document most relevant to the search query. */ + text?: string; + /** Same text passage as in the Text property with highlighted phrases most relevant to the query. */ + highlights?: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function queryCaptionResultSerializer(item: QueryCaptionResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + text: item["text"], + highlights: item["highlights"], + }; +} + +export function queryCaptionResultDeserializer(item: any): QueryCaptionResult { + return { + additionalProperties: serializeRecord(item, ["text", "highlights"]), + text: item["text"], + highlights: item["highlights"], + }; +} + +export function documentDebugInfoArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return documentDebugInfoDeserializer(item); + }); +} + +/** Contains debugging information that can be used to further explore your search results. */ +export interface DocumentDebugInfo { + /** Contains debugging information specific to semantic ranking requests. */ + readonly semantic?: SemanticDebugInfo; + /** Contains debugging information specific to vector and hybrid search. */ + readonly vectors?: VectorsDebugInfo; + /** Contains debugging information specific to vectors matched within a collection of complex types. */ + readonly innerHits?: Record; +} + +export function documentDebugInfoDeserializer(item: any): DocumentDebugInfo { + return { + semantic: !item["semantic"] + ? item["semantic"] + : semanticDebugInfoDeserializer(item["semantic"]), + vectors: !item["vectors"] ? item["vectors"] : vectorsDebugInfoDeserializer(item["vectors"]), + innerHits: !item["innerHits"] + ? item["innerHits"] + : queryResultDocumentInnerHitArrayRecordDeserializer(item["innerHits"]), + }; +} + +/** Contains debugging information specific to semantic ranking requests. */ +export interface SemanticDebugInfo { + /** The title field that was sent to the semantic enrichment process, as well as how it was used */ + readonly titleField?: QueryResultDocumentSemanticField; + /** The content fields that were sent to the semantic enrichment process, as well as how they were used */ + readonly contentFields?: QueryResultDocumentSemanticField[]; + /** The keyword fields that were sent to the semantic enrichment process, as well as how they were used */ + readonly keywordFields?: QueryResultDocumentSemanticField[]; + /** The raw concatenated strings that were sent to the semantic enrichment process. */ + readonly rerankerInput?: QueryResultDocumentRerankerInput; +} + +export function semanticDebugInfoDeserializer(item: any): SemanticDebugInfo { + return { + titleField: !item["titleField"] + ? item["titleField"] + : queryResultDocumentSemanticFieldDeserializer(item["titleField"]), + contentFields: !item["contentFields"] + ? item["contentFields"] + : queryResultDocumentSemanticFieldArrayDeserializer(item["contentFields"]), + keywordFields: !item["keywordFields"] + ? item["keywordFields"] + : queryResultDocumentSemanticFieldArrayDeserializer(item["keywordFields"]), + rerankerInput: !item["rerankerInput"] + ? item["rerankerInput"] + : queryResultDocumentRerankerInputDeserializer(item["rerankerInput"]), + }; +} + +/** Description of fields that were sent to the semantic enrichment process, as well as how they were used */ +export interface QueryResultDocumentSemanticField { + /** The name of the field that was sent to the semantic enrichment process */ + readonly name?: string; + /** The way the field was used for the semantic enrichment process (fully used, partially used, or unused) */ + readonly state?: SemanticFieldState; +} + +export function queryResultDocumentSemanticFieldDeserializer( + item: any, +): QueryResultDocumentSemanticField { + return { + name: item["name"], + state: item["state"], + }; +} + +/** The way the field was used for the semantic enrichment process. */ +export enum KnownSemanticFieldState { + /** The field was fully used for semantic enrichment. */ + Used = "used", + /** The field was not used for semantic enrichment. */ + Unused = "unused", + /** The field was partially used for semantic enrichment. */ + Partial = "partial", +} + +/** + * The way the field was used for the semantic enrichment process. \ + * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **used**: The field was fully used for semantic enrichment. \ + * **unused**: The field was not used for semantic enrichment. \ + * **partial**: The field was partially used for semantic enrichment. + */ +export type SemanticFieldState = string; + +export function queryResultDocumentSemanticFieldArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return queryResultDocumentSemanticFieldDeserializer(item); + }); +} + +/** The raw concatenated strings that were sent to the semantic enrichment process. */ +export interface QueryResultDocumentRerankerInput { + /** The raw string for the title field that was used for semantic enrichment. */ + readonly title?: string; + /** The raw concatenated strings for the content fields that were used for semantic enrichment. */ + readonly content?: string; + /** The raw concatenated strings for the keyword fields that were used for semantic enrichment. */ + readonly keywords?: string; +} + +export function queryResultDocumentRerankerInputDeserializer( + item: any, +): QueryResultDocumentRerankerInput { + return { + title: item["title"], + content: item["content"], + keywords: item["keywords"], + }; +} + +/** "Contains debugging information specific to vector and hybrid search.") */ +export interface VectorsDebugInfo { + /** The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF. */ + readonly subscores?: QueryResultDocumentSubscores; +} + +export function vectorsDebugInfoDeserializer(item: any): VectorsDebugInfo { + return { + subscores: !item["subscores"] + ? item["subscores"] + : queryResultDocumentSubscoresDeserializer(item["subscores"]), + }; +} + +/** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */ +export interface QueryResultDocumentSubscores { + /** The BM25 or Classic score for the text portion of the query. */ + readonly text?: TextResult; + /** The vector similarity and @search.score values for each vector query. */ + readonly vectors?: Record[]; + /** The BM25 or Classic score for the text portion of the query. */ + readonly documentBoost?: number; +} + +export function queryResultDocumentSubscoresDeserializer(item: any): QueryResultDocumentSubscores { + return { + text: !item["text"] ? item["text"] : textResultDeserializer(item["text"]), + vectors: !item["vectors"] + ? item["vectors"] + : singleVectorFieldResultRecordArrayDeserializer(item["vectors"]), + documentBoost: item["documentBoost"], + }; +} + +/** The BM25 or Classic score for the text portion of the query. */ +export interface TextResult { + /** The BM25 or Classic score for the text portion of the query. */ + readonly searchScore?: number; +} + +export function textResultDeserializer(item: any): TextResult { + return { + searchScore: item["searchScore"], + }; +} + +export function singleVectorFieldResultRecordArrayDeserializer( + result: Array>, +): any[] { + return result.map((item) => { + return singleVectorFieldResultRecordDeserializer(item); + }); +} + +export function singleVectorFieldResultRecordDeserializer( + item: Record, +): Record { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : singleVectorFieldResultDeserializer(item[key]); + }); + return result; +} + +/** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */ +export interface SingleVectorFieldResult { + /** The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query. */ + readonly searchScore?: number; + /** The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance. */ + readonly vectorSimilarity?: number; +} + +export function singleVectorFieldResultDeserializer(item: any): SingleVectorFieldResult { + return { + searchScore: item["searchScore"], + vectorSimilarity: item["vectorSimilarity"], + }; +} + +export function queryResultDocumentInnerHitArrayRecordDeserializer( + item: Record, +): Record> { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : queryResultDocumentInnerHitArrayDeserializer(item[key]); + }); + return result; +} + +export function queryResultDocumentInnerHitArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return queryResultDocumentInnerHitDeserializer(item); + }); +} + +/** Detailed scoring information for an individual element of a complex collection. */ +export interface QueryResultDocumentInnerHit { + /** Position of this specific matching element within it's original collection. Position starts at 0. */ + readonly ordinal?: number; + /** Detailed scoring information for an individual element of a complex collection that matched a vector query. */ + readonly vectors?: Record[]; +} + +export function queryResultDocumentInnerHitDeserializer(item: any): QueryResultDocumentInnerHit { + return { + ordinal: item["ordinal"], + vectors: !item["vectors"] + ? item["vectors"] + : singleVectorFieldResultRecordArrayDeserializer(item["vectors"]), + }; +} + +/** Reason that a partial response was returned for a semantic ranking request. */ +export enum KnownSemanticErrorReason { + /** If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. */ + MaxWaitExceeded = "maxWaitExceeded", + /** The request was throttled. Only the base results were returned. */ + CapacityOverloaded = "capacityOverloaded", + /** At least one step of the semantic process failed. */ + Transient = "transient", +} + +/** + * Reason that a partial response was returned for a semantic ranking request. \ + * {@link KnownSemanticErrorReason} can be used interchangeably with SemanticErrorReason, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **maxWaitExceeded**: If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. \ + * **capacityOverloaded**: The request was throttled. Only the base results were returned. \ + * **transient**: At least one step of the semantic process failed. + */ +export type SemanticErrorReason = string; + +/** Type of partial response that was returned for a semantic ranking request. */ +export enum KnownSemanticSearchResultsType { + /** Results without any semantic enrichment or reranking. */ + BaseResults = "baseResults", + /** Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. */ + RerankedResults = "rerankedResults", +} + +/** + * Type of partial response that was returned for a semantic ranking request. \ + * {@link KnownSemanticSearchResultsType} can be used interchangeably with SemanticSearchResultsType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **baseResults**: Results without any semantic enrichment or reranking. \ + * **rerankedResults**: Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. + */ +export type SemanticSearchResultsType = string; + +/** Type of query rewrite that was used for this request. */ +export enum KnownSemanticQueryRewritesResultType { + /** Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. */ + OriginalQueryOnly = "originalQueryOnly", +} + +/** + * Type of query rewrite that was used for this request. \ + * {@link KnownSemanticQueryRewritesResultType} can be used interchangeably with SemanticQueryRewritesResultType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **originalQueryOnly**: Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. + */ +export type SemanticQueryRewritesResultType = string; + +/** A document retrieved via a document lookup operation. */ +export interface LookupDocument { + /** Additional properties */ + additionalProperties?: Record; +} + +export function lookupDocumentDeserializer(item: any): LookupDocument { + return { + additionalProperties: serializeRecord(item, []), + }; +} + +/** Response containing suggestion query results from an index. */ +export interface SuggestDocumentsResult { + /** The sequence of results returned by the query. */ + results: SuggestResult[]; + /** A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not set in the request. */ + coverage?: number; +} + +export function suggestDocumentsResultDeserializer(item: any): SuggestDocumentsResult { + return { + results: suggestResultArrayDeserializer(item["value"]), + coverage: item["@search.coverage"], + }; +} + +export function suggestResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return suggestResultSerializer(item); + }); +} + +export function suggestResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return suggestResultDeserializer(item); + }); +} + +/** A result containing a document found by a suggestion query, plus associated metadata. */ +export interface SuggestResult { + /** The text of the suggestion result. */ + text: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function suggestResultSerializer(item: SuggestResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + "@search.text": item["text"], + }; +} + +export function suggestResultDeserializer(item: any): SuggestResult { + return { + additionalProperties: serializeRecord(item, ["text"]), + text: item["@search.text"], + }; +} + +/** Contains a batch of document write actions to send to the index. */ +export interface IndexDocumentsBatch { + /** The actions in the batch. */ + actions: IndexAction[]; +} + +export function indexDocumentsBatchSerializer(item: IndexDocumentsBatch): any { + return { value: indexActionArraySerializer(item["actions"]) }; +} + +export function indexActionArraySerializer(result: Array): any[] { + return result.map((item) => { + return indexActionSerializer(item); + }); +} + +/** Represents an index action that operates on a document. */ +export interface IndexAction { + /** The operation to perform on a document in an indexing batch. */ + actionType?: IndexActionType; + /** Additional properties */ + additionalProperties?: Record; +} + +export function indexActionSerializer(item: IndexAction): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + "@search.action": item["actionType"], + }; +} + +/** The operation to perform on a document in an indexing batch. */ +export enum KnownIndexActionType { + /** Inserts the document into the index if it is new and updates it if it exists. All fields are replaced in the update case. */ + Upload = "upload", + /** Merges the specified field values with an existing document. If the document does not exist, the merge will fail. Any field you specify in a merge will replace the existing field in the document. This also applies to collections of primitive and complex types. */ + Merge = "merge", + /** Behaves like merge if a document with the given key already exists in the index. If the document does not exist, it behaves like upload with a new document. */ + MergeOrUpload = "mergeOrUpload", + /** Removes the specified document from the index. Any field you specify in a delete operation other than the key field will be ignored. If you want to remove an individual field from a document, use merge instead and set the field explicitly to null. */ + Delete = "delete", +} + +/** + * The operation to perform on a document in an indexing batch. \ + * {@link KnownIndexActionType} can be used interchangeably with IndexActionType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **upload**: Inserts the document into the index if it is new and updates it if it exists. All fields are replaced in the update case. \ + * **merge**: Merges the specified field values with an existing document. If the document does not exist, the merge will fail. Any field you specify in a merge will replace the existing field in the document. This also applies to collections of primitive and complex types. \ + * **mergeOrUpload**: Behaves like merge if a document with the given key already exists in the index. If the document does not exist, it behaves like upload with a new document. \ + * **delete**: Removes the specified document from the index. Any field you specify in a delete operation other than the key field will be ignored. If you want to remove an individual field from a document, use merge instead and set the field explicitly to null. + */ +export type IndexActionType = string; + +/** Response containing the status of operations for all documents in the indexing request. */ +export interface IndexDocumentsResult { + /** The list of status information for each document in the indexing request. */ + results: IndexingResult[]; +} + +export function indexDocumentsResultDeserializer(item: any): IndexDocumentsResult { + return { + results: indexingResultArrayDeserializer(item["value"]), + }; +} + +export function indexingResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return indexingResultSerializer(item); + }); +} + +export function indexingResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return indexingResultDeserializer(item); + }); +} + +/** Status of an indexing operation for a single document. */ +export interface IndexingResult { + /** The key of a document that was in the indexing request. */ + key: string; + /** The error message explaining why the indexing operation failed for the document identified by the key; null if indexing succeeded. */ + errorMessage?: string; + /** A value indicating whether the indexing operation succeeded for the document identified by the key. */ + succeeded: boolean; + /** The status code of the indexing operation. Possible values include: 200 for a successful update or delete, 201 for successful document creation, 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. */ + statusCode: number; +} + +export function indexingResultSerializer(item: IndexingResult): any { + return { + key: item["key"], + errorMessage: item["errorMessage"], + status: item["succeeded"], + statusCode: item["statusCode"], + }; +} + +export function indexingResultDeserializer(item: any): IndexingResult { + return { + key: item["key"], + errorMessage: item["errorMessage"], + succeeded: item["status"], + statusCode: item["statusCode"], + }; +} + +/** The result of Autocomplete query. */ +export interface AutocompleteResult { + /** A value indicating the percentage of the index that was considered by the autocomplete request, or null if minimumCoverage was not specified in the request. */ + coverage?: number; + /** The list of returned Autocompleted items. */ + results: AutocompleteItem[]; +} + +export function autocompleteResultDeserializer(item: any): AutocompleteResult { + return { + coverage: item["@search.coverage"], + results: autocompleteItemArrayDeserializer(item["value"]), + }; +} + +export function autocompleteItemArraySerializer(result: Array): any[] { + return result.map((item) => { + return autocompleteItemSerializer(item); + }); +} + +export function autocompleteItemArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return autocompleteItemDeserializer(item); + }); +} + +/** The result of Autocomplete requests. */ +export interface AutocompleteItem { + /** The completed term. */ + text: string; + /** The query along with the completed term. */ + queryPlusText: string; +} + +export function autocompleteItemSerializer(item: AutocompleteItem): any { + return { text: item["text"], queryPlusText: item["queryPlusText"] }; +} + +export function autocompleteItemDeserializer(item: any): AutocompleteItem { + return { + text: item["text"], + queryPlusText: item["queryPlusText"], + }; +} + +/** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context in producing autocomplete terms. */ +export enum KnownAutocompleteMode { + /** Only one term is suggested. If the query has two terms, only the last term is completed. For example, if the input is 'washington medic', the suggested terms could include 'medicaid', 'medicare', and 'medicine'. */ + OneTerm = "oneTerm", + /** Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', the suggested terms could include 'medicare coverage' and 'medical assistant'. */ + TwoTerms = "twoTerms", + /** Completes the last term in a query with two or more terms, where the last two terms are a phrase that exists in the index. For example, if the input is 'washington medic', the suggested terms could include 'washington medicaid' and 'washington medical'. */ + OneTermWithContext = "oneTermWithContext", +} + +/** + * Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context in producing autocomplete terms. \ + * {@link KnownAutocompleteMode} can be used interchangeably with AutocompleteMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneTerm**: Only one term is suggested. If the query has two terms, only the last term is completed. For example, if the input is 'washington medic', the suggested terms could include 'medicaid', 'medicare', and 'medicine'. \ + * **twoTerms**: Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', the suggested terms could include 'medicare coverage' and 'medical assistant'. \ + * **oneTermWithContext**: Completes the last term in a query with two or more terms, where the last two terms are a phrase that exists in the index. For example, if the input is 'washington medic', the suggested terms could include 'washington medicaid' and 'washington medical'. + */ +export type AutocompleteMode = string; diff --git a/sdk/search/search-documents/generated/models/index.ts b/sdk/search/search-documents/generated/models/index.ts new file mode 100644 index 000000000000..7b17a4980ee4 --- /dev/null +++ b/sdk/search/search-documents/generated/models/index.ts @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + IndexedSharePointKnowledgeSource, + IndexedSharePointKnowledgeSourceParameters, + KnowledgeSourceIngestionParameters, + AIServices, + KnownKnowledgeSourceIngestionPermissionOption, + KnowledgeSourceIngestionPermissionOption, + KnownKnowledgeSourceContentExtractionMode, + KnowledgeSourceContentExtractionMode, + IndexedOneLakeKnowledgeSource, + IndexedOneLakeKnowledgeSourceParameters, + WebKnowledgeSource, + WebKnowledgeSourceParameters, + WebKnowledgeSourceDomains, + WebKnowledgeSourceDomain, + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, + ServiceIndexersRuntime, + IndexerRuntime, + KnownKnowledgeBaseActivityRecordType, + KnowledgeBaseActivityRecordType, + KnownKnowledgeBaseReferenceType, + KnowledgeBaseReferenceType, + KnowledgeSourceStatus, + KnownKnowledgeSourceSynchronizationStatus, + KnowledgeSourceSynchronizationStatus, + SynchronizationState, + CompletedSynchronizationState, + KnowledgeSourceStatistics, + KnownVersions, +} from "./models.js"; diff --git a/sdk/search/search-documents/generated/models/models.ts b/sdk/search/search-documents/generated/models/models.ts new file mode 100644 index 000000000000..b7e2111babe4 --- /dev/null +++ b/sdk/search/search-documents/generated/models/models.ts @@ -0,0 +1,820 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + searchResourceEncryptionKeySerializer, + searchResourceEncryptionKeyDeserializer, + searchIndexerDataIdentityUnionSerializer, + searchIndexerDataIdentityUnionDeserializer, + SearchIndexerDataIdentityUnion, + KnowledgeSource, + IndexingSchedule, + indexingScheduleSerializer, + indexingScheduleDeserializer, + CreatedResources, + createdResourcesSerializer, + BlobIndexerDataToExtract, + BlobIndexerImageAction, + BlobIndexerParsingMode, + MarkdownHeaderDepth, + MarkdownParsingSubmode, + BlobIndexerPDFTextRotationAlgorithm, +} from "./azure/search/documents/indexes/models.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Configuration for SharePoint knowledge source. */ +export interface IndexedSharePointKnowledgeSource extends KnowledgeSource { + kind: "indexedSharePoint"; + /** The parameters for the knowledge source. */ + indexedSharePointParameters: IndexedSharePointKnowledgeSourceParameters; +} + +export function indexedSharePointKnowledgeSourceSerializer( + item: IndexedSharePointKnowledgeSource, +): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + indexedSharePointParameters: indexedSharePointKnowledgeSourceParametersSerializer( + item["indexedSharePointParameters"], + ), + }; +} + +export function indexedSharePointKnowledgeSourceDeserializer( + item: any, +): IndexedSharePointKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + indexedSharePointParameters: indexedSharePointKnowledgeSourceParametersDeserializer( + item["indexedSharePointParameters"], + ), + }; +} + +/** Parameters for SharePoint knowledge source. */ +export interface IndexedSharePointKnowledgeSourceParameters { + /** An explicit identity to use for this knowledge source. */ + identity?: SearchIndexerDataIdentityUnion; + /** Key-based connection string or the ResourceId format if using a managed identity. */ + connectionString: string; + /** The name of the SharePoint container. */ + containerName: string; + /** Optional query to filter SharePoint content. */ + query?: string; + /** Optional ingestion parameters. */ + ingestionParameters?: KnowledgeSourceIngestionParameters; +} + +export function indexedSharePointKnowledgeSourceParametersSerializer( + item: IndexedSharePointKnowledgeSourceParameters, +): any { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + query: item["query"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersSerializer(item["ingestionParameters"]), + }; +} + +export function indexedSharePointKnowledgeSourceParametersDeserializer( + item: any, +): IndexedSharePointKnowledgeSourceParameters { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + query: item["query"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersDeserializer(item["ingestionParameters"]), + }; +} + +/** Consolidates all general ingestion settings for knowledge sources. */ +export interface KnowledgeSourceIngestionParameters { + /** The schedule for ingestion. */ + ingestionSchedule?: IndexingSchedule; + /** The AI Services configuration. */ + aiServices?: AIServices; + /** The maximum number of items to extract from the source. */ + maxItemsToExtract?: number; + /** The maximum size of the document to extract. */ + maxDocumentExtractionSize?: number; + /** The data to extract from the source. */ + dataToExtract?: BlobIndexerDataToExtract; + /** The action to take on images. */ + imageAction?: BlobIndexerImageAction; + /** The parsing mode to use. */ + parsingMode?: BlobIndexerParsingMode; + /** Whether to fail on unprocessable document. */ + failOnUnprocessableDocument?: boolean; + /** Whether to fail on unsupported content type. */ + failOnUnsupportedContentType?: boolean; + /** Indexed file name extensions. */ + indexedFileNameExtensions?: string[]; + /** Excluded file name extensions. */ + excludedFileNameExtensions?: string[]; + /** Whether to index storage metadata only for oversized documents. */ + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + /** Delimited text delimiter. */ + delimitedTextDelimiter?: string; + /** Whether the first line contains headers. */ + firstLineContainsHeaders?: boolean; + /** Delimited text headers. */ + delimitedTextHeaders?: string; + /** The document root. */ + documentRoot?: string; + /** The markdown header depth. */ + markdownHeaderDepth?: MarkdownHeaderDepth; + /** The markdown parsing submode. */ + markdownParsingSubmode?: MarkdownParsingSubmode; + /** The PDF text rotation algorithm. */ + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; + /** Permission options for ingestion. */ + ingestionPermissionOptions?: KnowledgeSourceIngestionPermissionOption[]; + /** Whether to allow skillset to read file data. */ + allowSkillsetToReadFileData?: boolean; + /** Optional content extraction mode. Default is 'minimal'. */ + contentExtractionMode?: KnowledgeSourceContentExtractionMode; +} + +export function knowledgeSourceIngestionParametersSerializer( + item: KnowledgeSourceIngestionParameters, +): any { + return { + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleSerializer(item["ingestionSchedule"]), + aiServices: !item["aiServices"] ? item["aiServices"] : aiServicesSerializer(item["aiServices"]), + maxItemsToExtract: item["maxItemsToExtract"], + maxDocumentExtractionSize: item["maxDocumentExtractionSize"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + parsingMode: item["parsingMode"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + indexedFileNameExtensions: !item["indexedFileNameExtensions"] + ? item["indexedFileNameExtensions"] + : item["indexedFileNameExtensions"].map((p: any) => { + return p; + }), + excludedFileNameExtensions: !item["excludedFileNameExtensions"] + ? item["excludedFileNameExtensions"] + : item["excludedFileNameExtensions"].map((p: any) => { + return p; + }), + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + delimitedTextHeaders: item["delimitedTextHeaders"], + documentRoot: item["documentRoot"], + markdownHeaderDepth: item["markdownHeaderDepth"], + markdownParsingSubmode: item["markdownParsingSubmode"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + ingestionPermissionOptions: !item["ingestionPermissionOptions"] + ? item["ingestionPermissionOptions"] + : item["ingestionPermissionOptions"].map((p: any) => { + return p; + }), + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + contentExtractionMode: item["contentExtractionMode"], + }; +} + +export function knowledgeSourceIngestionParametersDeserializer( + item: any, +): KnowledgeSourceIngestionParameters { + return { + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleDeserializer(item["ingestionSchedule"]), + aiServices: !item["aiServices"] + ? item["aiServices"] + : aiServicesDeserializer(item["aiServices"]), + maxItemsToExtract: item["maxItemsToExtract"], + maxDocumentExtractionSize: item["maxDocumentExtractionSize"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + parsingMode: item["parsingMode"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + indexedFileNameExtensions: !item["indexedFileNameExtensions"] + ? item["indexedFileNameExtensions"] + : item["indexedFileNameExtensions"].map((p: any) => { + return p; + }), + excludedFileNameExtensions: !item["excludedFileNameExtensions"] + ? item["excludedFileNameExtensions"] + : item["excludedFileNameExtensions"].map((p: any) => { + return p; + }), + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + delimitedTextHeaders: item["delimitedTextHeaders"], + documentRoot: item["documentRoot"], + markdownHeaderDepth: item["markdownHeaderDepth"], + markdownParsingSubmode: item["markdownParsingSubmode"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + ingestionPermissionOptions: !item["ingestionPermissionOptions"] + ? item["ingestionPermissionOptions"] + : item["ingestionPermissionOptions"].map((p: any) => { + return p; + }), + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + contentExtractionMode: item["contentExtractionMode"], + }; +} + +/** Parameters for AI Services. */ +export interface AIServices { + /** The URI of the AI Services endpoint. */ + uri: string; + /** The API key for accessing AI Services. */ + apiKey?: string; +} + +export function aiServicesSerializer(item: AIServices): any { + return { uri: item["uri"], apiKey: item["apiKey"] }; +} + +export function aiServicesDeserializer(item: any): AIServices { + return { + uri: item["uri"], + apiKey: item["apiKey"], + }; +} + +/** Permission types to ingest together with document content. */ +export enum KnownKnowledgeSourceIngestionPermissionOption { + /** Ingest explicit user identifiers alongside document content. */ + UserIds = "userIds", + /** Ingest group identifiers alongside document content. */ + GroupIds = "groupIds", + /** Ingest RBAC scope information alongside document content. */ + RbacScope = "rbacScope", +} + +/** + * Permission types to ingest together with document content. \ + * {@link KnownKnowledgeSourceIngestionPermissionOption} can be used interchangeably with KnowledgeSourceIngestionPermissionOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **userIds**: Ingest explicit user identifiers alongside document content. \ + * **groupIds**: Ingest group identifiers alongside document content. \ + * **rbacScope**: Ingest RBAC scope information alongside document content. + */ +export type KnowledgeSourceIngestionPermissionOption = string; + +/** Optional content extraction mode. Default is 'minimal'. */ +export enum KnownKnowledgeSourceContentExtractionMode { + /** Extracts only essential metadata while deferring most content processing. */ + Minimal = "minimal", + /** Performs the full default content extraction pipeline. */ + Standard = "standard", +} + +/** + * Optional content extraction mode. Default is 'minimal'. \ + * {@link KnownKnowledgeSourceContentExtractionMode} can be used interchangeably with KnowledgeSourceContentExtractionMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **minimal**: Extracts only essential metadata while deferring most content processing. \ + * **standard**: Performs the full default content extraction pipeline. + */ +export type KnowledgeSourceContentExtractionMode = string; + +/** Configuration for OneLake knowledge source. */ +export interface IndexedOneLakeKnowledgeSource extends KnowledgeSource { + kind: "indexedOneLake"; + /** The parameters for the knowledge source. */ + indexedOneLakeParameters: IndexedOneLakeKnowledgeSourceParameters; +} + +export function indexedOneLakeKnowledgeSourceSerializer(item: IndexedOneLakeKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + indexedOneLakeParameters: indexedOneLakeKnowledgeSourceParametersSerializer( + item["indexedOneLakeParameters"], + ), + }; +} + +export function indexedOneLakeKnowledgeSourceDeserializer( + item: any, +): IndexedOneLakeKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + indexedOneLakeParameters: indexedOneLakeKnowledgeSourceParametersDeserializer( + item["indexedOneLakeParameters"], + ), + }; +} + +/** Parameters for OneLake knowledge source. */ +export interface IndexedOneLakeKnowledgeSourceParameters { + /** The Fabric workspace ID. */ + fabricWorkspaceId: string; + /** The lakehouse ID. */ + lakehouseId: string; + /** Optional target path within the lakehouse. */ + targetPath?: string; + /** Optional ingestion parameters. */ + ingestionParameters?: KnowledgeSourceIngestionParameters; +} + +export function indexedOneLakeKnowledgeSourceParametersSerializer( + item: IndexedOneLakeKnowledgeSourceParameters, +): any { + return { + fabricWorkspaceId: item["fabricWorkspaceId"], + lakehouseId: item["lakehouseId"], + targetPath: item["targetPath"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersSerializer(item["ingestionParameters"]), + }; +} + +export function indexedOneLakeKnowledgeSourceParametersDeserializer( + item: any, +): IndexedOneLakeKnowledgeSourceParameters { + return { + fabricWorkspaceId: item["fabricWorkspaceId"], + lakehouseId: item["lakehouseId"], + targetPath: item["targetPath"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersDeserializer(item["ingestionParameters"]), + }; +} + +/** Knowledge Source targeting web results. */ +export interface WebKnowledgeSource extends KnowledgeSource { + kind: "web"; + /** The parameters for the web knowledge source. */ + webParameters?: WebKnowledgeSourceParameters; +} + +export function webKnowledgeSourceSerializer(item: WebKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + webParameters: !item["webParameters"] + ? item["webParameters"] + : webKnowledgeSourceParametersSerializer(item["webParameters"]), + }; +} + +export function webKnowledgeSourceDeserializer(item: any): WebKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + webParameters: !item["webParameters"] + ? item["webParameters"] + : webKnowledgeSourceParametersDeserializer(item["webParameters"]), + }; +} + +/** Parameters for web knowledge source. */ +export interface WebKnowledgeSourceParameters { + /** Domain allow/block configuration for web results. */ + domains?: WebKnowledgeSourceDomains; +} + +export function webKnowledgeSourceParametersSerializer(item: WebKnowledgeSourceParameters): any { + return { + domains: !item["domains"] + ? item["domains"] + : webKnowledgeSourceDomainsSerializer(item["domains"]), + }; +} + +export function webKnowledgeSourceParametersDeserializer(item: any): WebKnowledgeSourceParameters { + return { + domains: !item["domains"] + ? item["domains"] + : webKnowledgeSourceDomainsDeserializer(item["domains"]), + }; +} + +/** Domain allow/block configuration for web knowledge source. */ +export interface WebKnowledgeSourceDomains { + /** Domains that are allowed for web results. */ + allowedDomains?: WebKnowledgeSourceDomain[]; + /** Domains that are blocked from web results. */ + blockedDomains?: WebKnowledgeSourceDomain[]; +} + +export function webKnowledgeSourceDomainsSerializer(item: WebKnowledgeSourceDomains): any { + return { + allowedDomains: !item["allowedDomains"] + ? item["allowedDomains"] + : webKnowledgeSourceDomainArraySerializer(item["allowedDomains"]), + blockedDomains: !item["blockedDomains"] + ? item["blockedDomains"] + : webKnowledgeSourceDomainArraySerializer(item["blockedDomains"]), + }; +} + +export function webKnowledgeSourceDomainsDeserializer(item: any): WebKnowledgeSourceDomains { + return { + allowedDomains: !item["allowedDomains"] + ? item["allowedDomains"] + : webKnowledgeSourceDomainArrayDeserializer(item["allowedDomains"]), + blockedDomains: !item["blockedDomains"] + ? item["blockedDomains"] + : webKnowledgeSourceDomainArrayDeserializer(item["blockedDomains"]), + }; +} + +export function webKnowledgeSourceDomainArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return webKnowledgeSourceDomainSerializer(item); + }); +} + +export function webKnowledgeSourceDomainArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return webKnowledgeSourceDomainDeserializer(item); + }); +} + +/** Configuration for web knowledge source domain. */ +export interface WebKnowledgeSourceDomain { + /** The address of the domain. */ + address: string; + /** Whether or not to include subpages from this domain. */ + includeSubpages?: boolean; +} + +export function webKnowledgeSourceDomainSerializer(item: WebKnowledgeSourceDomain): any { + return { address: item["address"], includeSubpages: item["includeSubpages"] }; +} + +export function webKnowledgeSourceDomainDeserializer(item: any): WebKnowledgeSourceDomain { + return { + address: item["address"], + includeSubpages: item["includeSubpages"], + }; +} + +/** Configuration for remote SharePoint knowledge source. */ +export interface RemoteSharePointKnowledgeSource extends KnowledgeSource { + kind: "remoteSharePoint"; + /** The parameters for the remote SharePoint knowledge source. */ + remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters; +} + +export function remoteSharePointKnowledgeSourceSerializer( + item: RemoteSharePointKnowledgeSource, +): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + remoteSharePointParameters: remoteSharePointKnowledgeSourceParametersSerializer( + item["remoteSharePointParameters"], + ), + }; +} + +export function remoteSharePointKnowledgeSourceDeserializer( + item: any, +): RemoteSharePointKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + remoteSharePointParameters: remoteSharePointKnowledgeSourceParametersDeserializer( + item["remoteSharePointParameters"], + ), + }; +} + +/** Parameters for remote SharePoint knowledge source. */ +export interface RemoteSharePointKnowledgeSourceParameters { + /** Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to scope the retrieval before the query runs. */ + filterExpression?: string; + /** A list of metadata fields to be returned for each item in the response. Only retrievable metadata properties can be included in this list. By default, no metadata is returned. */ + resourceMetadata?: string[]; + /** Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint Online. */ + containerTypeId?: string; +} + +export function remoteSharePointKnowledgeSourceParametersSerializer( + item: RemoteSharePointKnowledgeSourceParameters, +): any { + return { + filterExpression: item["filterExpression"], + resourceMetadata: !item["resourceMetadata"] + ? item["resourceMetadata"] + : item["resourceMetadata"].map((p: any) => { + return p; + }), + containerTypeId: item["containerTypeId"], + }; +} + +export function remoteSharePointKnowledgeSourceParametersDeserializer( + item: any, +): RemoteSharePointKnowledgeSourceParameters { + return { + filterExpression: item["filterExpression"], + resourceMetadata: !item["resourceMetadata"] + ? item["resourceMetadata"] + : item["resourceMetadata"].map((p: any) => { + return p; + }), + containerTypeId: item["containerTypeId"], + }; +} + +/** Represents service-level indexer runtime counters. */ +export interface ServiceIndexersRuntime { + /** Cumulative runtime of all indexers in the service from the beginningTime to endingTime, in seconds. */ + usedSeconds: number; + /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */ + remainingSeconds?: number; + /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + beginningTime: Date; + /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + endingTime: Date; +} + +export function serviceIndexersRuntimeDeserializer(item: any): ServiceIndexersRuntime { + return { + usedSeconds: item["usedSeconds"], + remainingSeconds: item["remainingSeconds"], + beginningTime: new Date(item["beginningTime"]), + endingTime: new Date(item["endingTime"]), + }; +} + +/** Represents the indexer's cumulative runtime consumption in the service. */ +export interface IndexerRuntime { + /** Cumulative runtime of the indexer from the beginningTime to endingTime, in seconds. */ + usedSeconds: number; + /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */ + remainingSeconds?: number; + /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + beginningTime: Date; + /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + endingTime: Date; +} + +export function indexerRuntimeDeserializer(item: any): IndexerRuntime { + return { + usedSeconds: item["usedSeconds"], + remainingSeconds: item["remainingSeconds"], + beginningTime: new Date(item["beginningTime"]), + endingTime: new Date(item["endingTime"]), + }; +} + +/** The type of activity record. */ +export enum KnownKnowledgeBaseActivityRecordType { + /** Search index retrieval activity. */ + SearchIndex = "searchIndex", + /** Azure Blob retrieval activity. */ + AzureBlob = "azureBlob", + /** Indexed SharePoint retrieval activity. */ + IndexedSharePoint = "indexedSharePoint", + /** Indexed OneLake retrieval activity. */ + IndexedOneLake = "indexedOneLake", + /** Web retrieval activity. */ + Web = "web", + /** Remote SharePoint retrieval activity. */ + RemoteSharePoint = "remoteSharePoint", + /** LLM query planning activity. */ + ModelQueryPlanning = "modelQueryPlanning", + /** LLM answer synthesis activity. */ + ModelAnswerSynthesis = "modelAnswerSynthesis", + /** Agentic reasoning activity. */ + AgenticReasoning = "agenticReasoning", +} + +/** + * The type of activity record. \ + * {@link KnownKnowledgeBaseActivityRecordType} can be used interchangeably with KnowledgeBaseActivityRecordType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **searchIndex**: Search index retrieval activity. \ + * **azureBlob**: Azure Blob retrieval activity. \ + * **indexedSharePoint**: Indexed SharePoint retrieval activity. \ + * **indexedOneLake**: Indexed OneLake retrieval activity. \ + * **web**: Web retrieval activity. \ + * **remoteSharePoint**: Remote SharePoint retrieval activity. \ + * **modelQueryPlanning**: LLM query planning activity. \ + * **modelAnswerSynthesis**: LLM answer synthesis activity. \ + * **agenticReasoning**: Agentic reasoning activity. + */ +export type KnowledgeBaseActivityRecordType = string; + +/** The type of reference. */ +export enum KnownKnowledgeBaseReferenceType { + /** Search index document reference. */ + SearchIndex = "searchIndex", + /** Azure Blob document reference. */ + AzureBlob = "azureBlob", + /** Indexed SharePoint document reference. */ + IndexedSharePoint = "indexedSharePoint", + /** Indexed OneLake document reference. */ + IndexedOneLake = "indexedOneLake", + /** Web document reference. */ + Web = "web", + /** Remote SharePoint document reference. */ + RemoteSharePoint = "remoteSharePoint", +} + +/** + * The type of reference. \ + * {@link KnownKnowledgeBaseReferenceType} can be used interchangeably with KnowledgeBaseReferenceType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **searchIndex**: Search index document reference. \ + * **azureBlob**: Azure Blob document reference. \ + * **indexedSharePoint**: Indexed SharePoint document reference. \ + * **indexedOneLake**: Indexed OneLake document reference. \ + * **web**: Web document reference. \ + * **remoteSharePoint**: Remote SharePoint document reference. + */ +export type KnowledgeBaseReferenceType = string; + +/** Represents the status and synchronization history of a knowledge source. */ +export interface KnowledgeSourceStatus { + /** The current synchronization status. */ + synchronizationStatus?: KnowledgeSourceSynchronizationStatus; + /** The created resources. */ + createdResources?: CreatedResources; + /** The current synchronization state. */ + currentSynchronizationState?: SynchronizationState; + /** The last synchronization state. */ + lastSynchronizationState?: CompletedSynchronizationState; + /** The statistics for the knowledge source. */ + statistics?: KnowledgeSourceStatistics; +} + +export function knowledgeSourceStatusSerializer(item: KnowledgeSourceStatus): any { + return { + synchronizationStatus: item["synchronizationStatus"], + createdResources: !item["createdResources"] + ? item["createdResources"] + : createdResourcesSerializer(item["createdResources"]), + currentSynchronizationState: !item["currentSynchronizationState"] + ? item["currentSynchronizationState"] + : synchronizationStateSerializer(item["currentSynchronizationState"]), + lastSynchronizationState: !item["lastSynchronizationState"] + ? item["lastSynchronizationState"] + : completedSynchronizationStateSerializer(item["lastSynchronizationState"]), + statistics: !item["statistics"] + ? item["statistics"] + : knowledgeSourceStatisticsSerializer(item["statistics"]), + }; +} + +/** The current synchronization status of the knowledge source. */ +export enum KnownKnowledgeSourceSynchronizationStatus { + /** The knowledge source is being provisioned. */ + Creating = "creating", + /** The knowledge source is active and synchronization runs are occurring. */ + Active = "active", + /** The knowledge source is being deleted. */ + Deleting = "deleting", +} + +/** + * The current synchronization status of the knowledge source. \ + * {@link KnownKnowledgeSourceSynchronizationStatus} can be used interchangeably with KnowledgeSourceSynchronizationStatus, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **creating**: The knowledge source is being provisioned. \ + * **active**: The knowledge source is active and synchronization runs are occurring. \ + * **deleting**: The knowledge source is being deleted. + */ +export type KnowledgeSourceSynchronizationStatus = string; + +/** Represents the current state of an ongoing synchronization that spans multiple indexer runs. */ +export interface SynchronizationState { + /** The start time of the current synchronization. */ + startTime: Date; + /** The number of item updates successfully processed in the current synchronization. */ + itemsUpdatesProcessed: number; + /** The number of item updates that failed in the current synchronization. */ + itemsUpdatesFailed: number; + /** The number of items skipped in the current synchronization. */ + itemsSkipped: number; +} + +export function synchronizationStateSerializer(item: SynchronizationState): any { + return { + startTime: item["startTime"].toISOString(), + itemsUpdatesProcessed: item["itemsUpdatesProcessed"], + itemsUpdatesFailed: item["itemsUpdatesFailed"], + itemsSkipped: item["itemsSkipped"], + }; +} + +/** Represents the completed state of the last synchronization. */ +export interface CompletedSynchronizationState { + /** The start time of the last completed synchronization. */ + startTime: Date; + /** The end time of the last completed synchronization. */ + endTime: Date; + /** The number of item updates successfully processed in the last synchronization. */ + itemsUpdatesProcessed: number; + /** The number of item updates that failed in the last synchronization. */ + itemsUpdatesFailed: number; + /** The number of items skipped in the last synchronization. */ + itemsSkipped: number; +} + +export function completedSynchronizationStateSerializer(item: CompletedSynchronizationState): any { + return { + startTime: item["startTime"].toISOString(), + endTime: item["endTime"].toISOString(), + itemsUpdatesProcessed: item["itemsUpdatesProcessed"], + itemsUpdatesFailed: item["itemsUpdatesFailed"], + itemsSkipped: item["itemsSkipped"], + }; +} + +/** Statistical information about knowledge source synchronization history. */ +export interface KnowledgeSourceStatistics { + /** Total number of synchronizations. */ + totalSynchronization: number; + /** Average synchronization duration. */ + averageSynchronizationDuration: string; + /** Average items processed per synchronization. */ + averageItemsProcessedPerSynchronization: number; +} + +export function knowledgeSourceStatisticsSerializer(item: KnowledgeSourceStatistics): any { + return { + totalSynchronization: item["totalSynchronization"], + averageSynchronizationDuration: item["averageSynchronizationDuration"], + averageItemsProcessedPerSynchronization: item["averageItemsProcessedPerSynchronization"], + }; +} + +/** The available API versions. */ +export enum KnownVersions { + /** The 2025-11-01-preview API version. */ + V20251101Preview = "2025-11-01-preview", +} diff --git a/sdk/search/search-documents/generated/search/api/index.ts b/sdk/search/search-documents/generated/search/api/index.ts new file mode 100644 index 000000000000..4cd0afe6f9ca --- /dev/null +++ b/sdk/search/search-documents/generated/search/api/index.ts @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + autocompletePost, + autocompleteGet, + index, + suggestPost, + suggestGet, + getDocument, + searchPost, + searchGet, + getDocumentCount, +} from "./operations.js"; +export { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, +} from "./options.js"; +export { createSearch, SearchContext, SearchClientOptionalParams } from "./searchContext.js"; diff --git a/sdk/search/search-documents/generated/search/api/operations.ts b/sdk/search/search-documents/generated/search/api/operations.ts new file mode 100644 index 000000000000..618f592cb639 --- /dev/null +++ b/sdk/search/search-documents/generated/search/api/operations.ts @@ -0,0 +1,712 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { SearchContext as Client } from "./index.js"; +import { + errorResponseDeserializer, + SearchDocumentsResult, + searchDocumentsResultDeserializer, + vectorQueryUnionArraySerializer, + hybridSearchSerializer, + LookupDocument, + lookupDocumentDeserializer, + SuggestDocumentsResult, + suggestDocumentsResultDeserializer, + IndexDocumentsBatch, + indexDocumentsBatchSerializer, + IndexDocumentsResult, + indexDocumentsResultDeserializer, + AutocompleteResult, + autocompleteResultDeserializer, +} from "../../models/azure/search/documents/models.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, +} from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _autocompletePostSend( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompletePostOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.post.autocomplete{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: { + search: searchText, + autocompleteMode: options?.autocompleteMode, + filter: options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + searchFields: options?.searchFields, + suggesterName: suggesterName, + top: options?.top, + }, + }); +} + +export async function _autocompletePostDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return autocompleteResultDeserializer(result.body); +} + +/** Autocompletes incomplete query terms based on input text and matching terms in the index. */ +export async function autocompletePost( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompletePostOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _autocompletePostSend(context, searchText, suggesterName, options); + return _autocompletePostDeserialize(result); +} + +export function _autocompleteGetSend( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompleteGetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.autocomplete{?api%2Dversion,search,suggesterName,autocompleteMode,%24filter,fuzzy,highlightPostTag,highlightPreTag,minimumCoverage,searchFields,%24top}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + search: searchText, + suggesterName: suggesterName, + autocompleteMode: options?.autocompleteMode, + "%24filter": options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + searchFields: !options?.searchFields + ? options?.searchFields + : options?.searchFields.map((p: any) => { + return p; + }), + "%24top": options?.top, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _autocompleteGetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return autocompleteResultDeserializer(result.body); +} + +/** Autocompletes incomplete query terms based on input text and matching terms in the index. */ +export async function autocompleteGet( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompleteGetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _autocompleteGetSend(context, searchText, suggesterName, options); + return _autocompleteGetDeserialize(result); +} + +export function _indexSend( + context: Client, + batch: IndexDocumentsBatch, + options: IndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.index{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: indexDocumentsBatchSerializer(batch), + }); +} + +export async function _indexDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "207"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return indexDocumentsResultDeserializer(result.body); +} + +/** Sends a batch of document write actions to the index. */ +export async function index( + context: Client, + batch: IndexDocumentsBatch, + options: IndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _indexSend(context, batch, options); + return _indexDeserialize(result); +} + +export function _suggestPostSend( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestPostOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.post.suggest{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: { + filter: options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + orderby: options?.orderBy, + search: searchText, + searchFields: options?.searchFields, + select: options?.select, + suggesterName: suggesterName, + top: options?.top, + }, + }); +} + +export async function _suggestPostDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return suggestDocumentsResultDeserializer(result.body); +} + +/** Suggests documents in the index that match the given partial query text. */ +export async function suggestPost( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestPostOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _suggestPostSend(context, searchText, suggesterName, options); + return _suggestPostDeserialize(result); +} + +export function _suggestGetSend( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestGetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.suggest{?api%2Dversion,search,suggesterName,%24filter,fuzzy,highlightPostTag,highlightPreTag,minimumCoverage,%24orderby,searchFields,%24select,%24top}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + search: searchText, + suggesterName: suggesterName, + "%24filter": options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + "%24orderby": !options?.orderBy + ? options?.orderBy + : options?.orderBy.map((p: any) => { + return p; + }), + searchFields: !options?.searchFields + ? options?.searchFields + : options?.searchFields.map((p: any) => { + return p; + }), + "%24select": !options?.select + ? options?.select + : options?.select.map((p: any) => { + return p; + }), + "%24top": options?.top, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _suggestGetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return suggestDocumentsResultDeserializer(result.body); +} + +/** Suggests documents in the index that match the given partial query text. */ +export async function suggestGet( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestGetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _suggestGetSend(context, searchText, suggesterName, options); + return _suggestGetDeserialize(result); +} + +export function _getDocumentSend( + context: Client, + key: string, + options: GetDocumentOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs('{key}'){?api%2Dversion,%24select}", + { + key: key, + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + "%24select": !options?.selectedFields + ? options?.selectedFields + : options?.selectedFields.map((p: any) => { + return p; + }), + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDocumentDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return lookupDocumentDeserializer(result.body); +} + +/** Retrieves a document from the index. */ +export async function getDocument( + context: Client, + key: string, + options: GetDocumentOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDocumentSend(context, key, options); + return _getDocumentDeserialize(result); +} + +export function _searchPostSend( + context: Client, + options: SearchPostOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.post.search{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: { + count: options?.includeTotalCount, + facets: !options?.facets + ? options?.facets + : options?.facets.map((p: any) => { + return p; + }), + filter: options?.filter, + highlight: options?.highlightFields, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + orderby: options?.orderBy, + queryType: options?.queryType, + scoringStatistics: options?.scoringStatistics, + sessionId: options?.sessionId, + scoringParameters: !options?.scoringParameters + ? options?.scoringParameters + : options?.scoringParameters.map((p: any) => { + return p; + }), + scoringProfile: options?.scoringProfile, + debug: options?.debug, + search: options?.searchText, + searchFields: options?.searchFields, + searchMode: options?.searchMode, + queryLanguage: options?.queryLanguage, + speller: options?.querySpeller, + select: options?.select, + skip: options?.skip, + top: options?.top, + semanticConfiguration: options?.semanticConfigurationName, + semanticErrorHandling: options?.semanticErrorHandling, + semanticMaxWaitInMilliseconds: options?.semanticMaxWaitInMilliseconds, + semanticQuery: options?.semanticQuery, + answers: options?.answers, + captions: options?.captions, + queryRewrites: options?.queryRewrites, + semanticFields: options?.semanticFields, + vectorQueries: !options?.vectorQueries + ? options?.vectorQueries + : vectorQueryUnionArraySerializer(options?.vectorQueries), + vectorFilterMode: options?.vectorFilterMode, + hybridSearch: !options?.hybridSearch + ? options?.hybridSearch + : hybridSearchSerializer(options?.hybridSearch), + }, + }); +} + +export async function _searchPostDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "206"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchDocumentsResultDeserializer(result.body); +} + +/** Searches for documents in the index. */ +export async function searchPost( + context: Client, + options: SearchPostOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _searchPostSend(context, options); + return _searchPostDeserialize(result); +} + +export function _searchGetSend( + context: Client, + options: SearchGetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs{?api%2Dversion,search,%24count,facet*,%24filter,highlight,highlightPostTag,highlightPreTag,minimumCoverage,%24orderby,queryType,scoringParameter*,scoringProfile,searchFields,searchMode,scoringStatistics,sessionId,%24select,%24skip,%24top,semanticConfiguration,semanticErrorHandling,semanticMaxWaitInMilliseconds,answers,captions,semanticQuery,queryRewrites,debug,queryLanguage,speller,semanticFields}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + search: options?.searchText, + "%24count": options?.includeTotalResultCount, + facet: !options?.facets + ? options?.facets + : options?.facets.map((p: any) => { + return p; + }), + "%24filter": options?.filter, + highlight: !options?.highlightFields + ? options?.highlightFields + : options?.highlightFields.map((p: any) => { + return p; + }), + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + "%24orderby": !options?.orderBy + ? options?.orderBy + : options?.orderBy.map((p: any) => { + return p; + }), + queryType: options?.queryType, + scoringParameter: !options?.scoringParameters + ? options?.scoringParameters + : options?.scoringParameters.map((p: any) => { + return p; + }), + scoringProfile: options?.scoringProfile, + searchFields: !options?.searchFields + ? options?.searchFields + : options?.searchFields.map((p: any) => { + return p; + }), + searchMode: options?.searchMode, + scoringStatistics: options?.scoringStatistics, + sessionId: options?.sessionId, + "%24select": !options?.select + ? options?.select + : options?.select.map((p: any) => { + return p; + }), + "%24skip": options?.skip, + "%24top": options?.top, + semanticConfiguration: options?.semanticConfiguration, + semanticErrorHandling: options?.semanticErrorHandling, + semanticMaxWaitInMilliseconds: options?.semanticMaxWaitInMilliseconds, + answers: options?.answers, + captions: options?.captions, + semanticQuery: options?.semanticQuery, + queryRewrites: options?.queryRewrites, + debug: options?.debug, + queryLanguage: options?.queryLanguage, + speller: options?.speller, + semanticFields: !options?.semanticFields + ? options?.semanticFields + : options?.semanticFields.map((p: any) => { + return p; + }), + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _searchGetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "206"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchDocumentsResultDeserializer(result.body); +} + +/** Searches for documents in the index. */ +export async function searchGet( + context: Client, + options: SearchGetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _searchGetSend(context, options); + return _searchGetDeserialize(result); +} + +export function _getDocumentCountSend( + context: Client, + options: GetDocumentCountOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/$count{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "text/plain", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDocumentCountDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return result.body; +} + +/** Queries the number of documents in the index. */ +export async function getDocumentCount( + context: Client, + options: GetDocumentCountOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDocumentCountSend(context, options); + return _getDocumentCountDeserialize(result); +} diff --git a/sdk/search/search-documents/generated/search/api/options.ts b/sdk/search/search-documents/generated/search/api/options.ts new file mode 100644 index 000000000000..904a8e640d86 --- /dev/null +++ b/sdk/search/search-documents/generated/search/api/options.ts @@ -0,0 +1,288 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + QueryType, + ScoringStatistics, + QueryDebugMode, + SearchMode, + QueryLanguage, + QuerySpellerType, + SemanticErrorMode, + QueryAnswerType, + QueryCaptionType, + QueryRewritesType, + VectorQueryUnion, + VectorFilterMode, + HybridSearch, + AutocompleteMode, +} from "../../models/azure/search/documents/models.js"; +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface AutocompletePostOptionalParams extends OperationOptions { + /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ + autocompleteMode?: AutocompleteMode; + /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will autocomplete terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ + searchFields?: string; + /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface AutocompleteGetOptionalParams extends OperationOptions { + /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ + autocompleteMode?: AutocompleteMode; + /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ + searchFields?: string[]; + /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface IndexOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SuggestPostOptionalParams extends OperationOptions { + /** An OData expression that filters the documents considered for suggestions. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the suggestion query. Default is false. When set to true, the query will find suggestions even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestion query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** The comma-separated list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ + searchFields?: string; + /** The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results. */ + select?: string; + /** The number of suggestions to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SuggestGetOptionalParams extends OperationOptions { + /** An OData expression that filters the documents considered for suggestions. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the suggestions query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestions query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string[]; + /** The list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ + searchFields?: string[]; + /** The list of fields to retrieve. If unspecified, only the key field will be included in the results. */ + select?: string[]; + /** The number of suggestions to retrieve. The value must be a number between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface GetDocumentOptionalParams extends OperationOptions { + /** List of field names to retrieve for the document; Any field not retrieved will be missing from the returned document. */ + selectedFields?: string[]; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SearchPostOptionalParams extends OperationOptions { + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ + includeTotalCount?: boolean; + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ + facets?: string[]; + /** The OData $filter expression to apply to the search query. */ + filter?: string; + /** The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ + highlightFields?: string; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ + queryType?: QueryType; + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ + scoringStatistics?: ScoringStatistics; + /** A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ + sessionId?: string; + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ + scoringParameters?: string[]; + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ + scoringProfile?: string; + /** Enables a debugging tool that can be used to further explore your reranked results. */ + debug?: QueryDebugMode; + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; + /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ + searchFields?: string; + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ + searchMode?: SearchMode; + /** A value that specifies the language of the search query. */ + queryLanguage?: QueryLanguage; + /** A value that specified the type of the speller to use to spell-correct individual search query terms. */ + querySpeller?: QuerySpellerType; + /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ + select?: string; + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ + skip?: number; + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ + top?: number; + /** The name of a semantic configuration that will be used when processing documents for queries of type semantic. */ + semanticConfigurationName?: string; + /** Allows the user to choose whether a semantic call should fail completely (default / current behavior), or to return partial results. */ + semanticErrorHandling?: SemanticErrorMode; + /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ + semanticMaxWaitInMilliseconds?: number; + /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ + semanticQuery?: string; + /** A value that specifies whether answers should be returned as part of the search response. */ + answers?: QueryAnswerType; + /** A value that specifies whether captions should be returned as part of the search response. */ + captions?: QueryCaptionType; + /** A value that specifies whether query rewrites should be generated to augment the search query. */ + queryRewrites?: QueryRewritesType; + /** The comma-separated list of field names used for semantic ranking. */ + semanticFields?: string; + /** The query parameters for vector and hybrid search queries. */ + vectorQueries?: VectorQueryUnion[]; + /** Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. */ + vectorFilterMode?: VectorFilterMode; + /** The query parameters to configure hybrid search behaviors. */ + hybridSearch?: HybridSearch; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SearchGetOptionalParams extends OperationOptions { + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ + includeTotalResultCount?: boolean; + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ + facets?: string[]; + /** The OData $filter expression to apply to the search query. */ + filter?: string; + /** The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ + highlightFields?: string[]; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ + minimumCoverage?: number; + /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, and desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no OrderBy is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string[]; + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ + queryType?: QueryType; + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ + scoringParameters?: string[]; + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ + scoringProfile?: string; + /** The list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ + searchFields?: string[]; + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ + searchMode?: SearchMode; + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. */ + scoringStatistics?: ScoringStatistics; + /** A value to be used to create a sticky session, which can help to get more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ + sessionId?: string; + /** The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ + select?: string[]; + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use $skip due to this limitation, consider using $orderby on a totally-ordered key and $filter with a range query instead. */ + skip?: number; + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ + top?: number; + /** The name of the semantic configuration that lists which fields should be used for semantic ranking, captions, highlights, and answers */ + semanticConfiguration?: string; + /** Allows the user to choose whether a semantic call should fail completely, or to return partial results (default). */ + semanticErrorHandling?: SemanticErrorMode; + /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ + semanticMaxWaitInMilliseconds?: number; + /** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ + answers?: QueryAnswerType; + /** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ + captions?: QueryCaptionType; + /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ + semanticQuery?: string; + /** When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. This parameter is only valid if the query type is `semantic`. */ + queryRewrites?: QueryRewritesType; + /** Enables a debugging tool that can be used to further explore your search results. */ + debug?: QueryDebugMode; + /** The language of the query. */ + queryLanguage?: QueryLanguage; + /** Improve search recall by spell-correcting individual search query terms. */ + speller?: QuerySpellerType; + /** The list of field names used for semantic ranking. */ + semanticFields?: string[]; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface GetDocumentCountOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} diff --git a/sdk/search/search-documents/generated/search/api/searchContext.ts b/sdk/search/search-documents/generated/search/api/searchContext.ts new file mode 100644 index 000000000000..fbcc99814371 --- /dev/null +++ b/sdk/search/search-documents/generated/search/api/searchContext.ts @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface SearchContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; + /** The name of the index. */ + indexName: string; +} + +/** Optional parameters for the client. */ +export interface SearchClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createSearch( + endpointParam: string, + credential: KeyCredential | TokenCredential, + indexName: string, + options: SearchClientOptionalParams = {}, +): SearchContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion, indexName } as SearchContext; +} diff --git a/sdk/search/search-documents/generated/search/index.ts b/sdk/search/search-documents/generated/search/index.ts new file mode 100644 index 000000000000..5c33e50cb38b --- /dev/null +++ b/sdk/search/search-documents/generated/search/index.ts @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { SearchClient } from "./searchClient.js"; +export { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, + SearchContext, + SearchClientOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/generated/search/searchClient.ts b/sdk/search/search-documents/generated/search/searchClient.ts new file mode 100644 index 000000000000..e0d915db2cc6 --- /dev/null +++ b/sdk/search/search-documents/generated/search/searchClient.ts @@ -0,0 +1,134 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { createSearch, SearchContext, SearchClientOptionalParams } from "./api/index.js"; +import { + SearchDocumentsResult, + LookupDocument, + SuggestDocumentsResult, + IndexDocumentsBatch, + IndexDocumentsResult, + AutocompleteResult, +} from "../models/azure/search/documents/models.js"; +import { + autocompletePost, + autocompleteGet, + index, + suggestPost, + suggestGet, + getDocument, + searchPost, + searchGet, + getDocumentCount, +} from "./api/operations.js"; +import { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, +} from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { SearchClientOptionalParams } from "./api/searchContext.js"; + +export class SearchClient { + private _client: SearchContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + indexName: string, + options: SearchClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createSearch(endpointParam, credential, indexName, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** Autocompletes incomplete query terms based on input text and matching terms in the index. */ + autocompletePost( + searchText: string, + suggesterName: string, + options: AutocompletePostOptionalParams = { requestOptions: {} }, + ): Promise { + return autocompletePost(this._client, searchText, suggesterName, options); + } + + /** Autocompletes incomplete query terms based on input text and matching terms in the index. */ + autocompleteGet( + searchText: string, + suggesterName: string, + options: AutocompleteGetOptionalParams = { requestOptions: {} }, + ): Promise { + return autocompleteGet(this._client, searchText, suggesterName, options); + } + + /** Sends a batch of document write actions to the index. */ + index( + batch: IndexDocumentsBatch, + options: IndexOptionalParams = { requestOptions: {} }, + ): Promise { + return index(this._client, batch, options); + } + + /** Suggests documents in the index that match the given partial query text. */ + suggestPost( + searchText: string, + suggesterName: string, + options: SuggestPostOptionalParams = { requestOptions: {} }, + ): Promise { + return suggestPost(this._client, searchText, suggesterName, options); + } + + /** Suggests documents in the index that match the given partial query text. */ + suggestGet( + searchText: string, + suggesterName: string, + options: SuggestGetOptionalParams = { requestOptions: {} }, + ): Promise { + return suggestGet(this._client, searchText, suggesterName, options); + } + + /** Retrieves a document from the index. */ + getDocument( + key: string, + options: GetDocumentOptionalParams = { requestOptions: {} }, + ): Promise { + return getDocument(this._client, key, options); + } + + /** Searches for documents in the index. */ + searchPost( + options: SearchPostOptionalParams = { requestOptions: {} }, + ): Promise { + return searchPost(this._client, options); + } + + /** Searches for documents in the index. */ + searchGet( + options: SearchGetOptionalParams = { requestOptions: {} }, + ): Promise { + return searchGet(this._client, options); + } + + /** Queries the number of documents in the index. */ + getDocumentCount( + options: GetDocumentCountOptionalParams = { requestOptions: {} }, + ): Promise { + return getDocumentCount(this._client, options); + } +} diff --git a/sdk/search/search-documents/generated/searchIndex/api/index.ts b/sdk/search/search-documents/generated/searchIndex/api/index.ts new file mode 100644 index 000000000000..fc698a4544e2 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndex/api/index.ts @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + listIndexStatsSummary, + getServiceStatistics, + createKnowledgeSource, + listKnowledgeSources, + getKnowledgeSource, + deleteKnowledgeSource, + createOrUpdateKnowledgeSource, + createKnowledgeBase, + listKnowledgeBases, + getKnowledgeBase, + deleteKnowledgeBase, + createOrUpdateKnowledgeBase, + createAlias, + listAliases, + getAlias, + deleteAlias, + createOrUpdateAlias, + analyzeText, + getIndexStatistics, + createIndex, + listIndexes, + getIndex, + deleteIndex, + createOrUpdateIndex, + createSynonymMap, + getSynonymMaps, + getSynonymMap, + deleteSynonymMap, + createOrUpdateSynonymMap, +} from "./operations.js"; +export { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, +} from "./options.js"; +export { + createSearchIndex, + SearchIndexContext, + SearchIndexClientOptionalParams, +} from "./searchIndexContext.js"; diff --git a/sdk/search/search-documents/generated/searchIndex/api/operations.ts b/sdk/search/search-documents/generated/searchIndex/api/operations.ts new file mode 100644 index 000000000000..4debe3256e22 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndex/api/operations.ts @@ -0,0 +1,1612 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { SearchIndexContext as Client } from "./index.js"; +import { + SynonymMap, + synonymMapSerializer, + synonymMapDeserializer, + ListSynonymMapsResult, + listSynonymMapsResultDeserializer, + SearchIndex, + searchIndexSerializer, + searchIndexDeserializer, + _ListIndexesResult, + _listIndexesResultDeserializer, + GetIndexStatisticsResult, + getIndexStatisticsResultDeserializer, + AnalyzeTextOptions, + analyzeTextOptionsSerializer, + AnalyzeResult, + analyzeResultDeserializer, + SearchAlias, + searchAliasSerializer, + searchAliasDeserializer, + _ListAliasesResult, + _listAliasesResultDeserializer, + KnowledgeBase, + knowledgeBaseSerializer, + knowledgeBaseDeserializer, + _ListKnowledgeBasesResult, + _listKnowledgeBasesResultDeserializer, + knowledgeSourceUnionSerializer, + knowledgeSourceUnionDeserializer, + KnowledgeSourceUnion, + _ListKnowledgeSourcesResult, + _listKnowledgeSourcesResultDeserializer, + SearchServiceStatistics, + searchServiceStatisticsDeserializer, + _ListIndexStatsSummary, + _listIndexStatsSummaryDeserializer, + IndexStatisticsSummary, +} from "../../models/azure/search/documents/indexes/models.js"; +import { errorResponseDeserializer } from "../../models/azure/search/documents/models.js"; +import { + PagedAsyncIterableIterator, + buildPagedAsyncIterator, +} from "../../static-helpers/pagingHelpers.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, +} from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _listIndexStatsSummarySend( + context: Client, + options: ListIndexStatsSummaryOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexstats{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listIndexStatsSummaryDeserialize( + result: PathUncheckedResponse, +): Promise<_ListIndexStatsSummary> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listIndexStatsSummaryDeserializer(result.body); +} + +/** Retrieves a summary of statistics for all indexes in the search service. */ +export function listIndexStatsSummary( + context: Client, + options: ListIndexStatsSummaryOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listIndexStatsSummarySend(context, options), + _listIndexStatsSummaryDeserialize, + ["200"], + { itemName: "IndexesStatistics" }, + ); +} + +export function _getServiceStatisticsSend( + context: Client, + options: GetServiceStatisticsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/servicestats{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getServiceStatisticsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchServiceStatisticsDeserializer(result.body); +} + +/** Gets service level statistics for a search service. */ +export async function getServiceStatistics( + context: Client, + options: GetServiceStatisticsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getServiceStatisticsSend(context, options); + return _getServiceStatisticsDeserialize(result); +} + +export function _createKnowledgeSourceSend( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + options: CreateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeSourceUnionSerializer(knowledgeSource), + }); +} + +export async function _createKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeSourceUnionDeserializer(result.body); +} + +/** Creates a new knowledge source. */ +export async function createKnowledgeSource( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + options: CreateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createKnowledgeSourceSend(context, knowledgeSource, options); + return _createKnowledgeSourceDeserialize(result); +} + +export function _listKnowledgeSourcesSend( + context: Client, + options: ListKnowledgeSourcesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listKnowledgeSourcesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListKnowledgeSourcesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listKnowledgeSourcesResultDeserializer(result.body); +} + +/** Lists all knowledge sources available for a search service. */ +export function listKnowledgeSources( + context: Client, + options: ListKnowledgeSourcesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listKnowledgeSourcesSend(context, options), + _listKnowledgeSourcesDeserialize, + ["200"], + { itemName: "value" }, + ); +} + +export function _getKnowledgeSourceSend( + context: Client, + sourceName: string, + options: GetKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources('{sourceName}'){?api%2Dversion}", + { + sourceName: sourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeSourceUnionDeserializer(result.body); +} + +/** Retrieves a knowledge source definition. */ +export async function getKnowledgeSource( + context: Client, + sourceName: string, + options: GetKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getKnowledgeSourceSend(context, sourceName, options); + return _getKnowledgeSourceDeserialize(result); +} + +export function _deleteKnowledgeSourceSend( + context: Client, + sourceName: string, + options: DeleteKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources('{sourceName}'){?api%2Dversion}", + { + sourceName: sourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes an existing knowledge source. */ +export async function deleteKnowledgeSource( + context: Client, + sourceName: string, + options: DeleteKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteKnowledgeSourceSend(context, sourceName, options); + return _deleteKnowledgeSourceDeserialize(result); +} + +export function _createOrUpdateKnowledgeSourceSend( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + sourceName: string, + options: CreateOrUpdateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources('{sourceName}'){?api%2Dversion}", + { + sourceName: sourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeSourceUnionSerializer(knowledgeSource), + }); +} + +export async function _createOrUpdateKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeSourceUnionDeserializer(result.body); +} + +/** Creates a new knowledge source or updates an knowledge source if it already exists. */ +export async function createOrUpdateKnowledgeSource( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + sourceName: string, + options: CreateOrUpdateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateKnowledgeSourceSend( + context, + knowledgeSource, + sourceName, + options, + ); + return _createOrUpdateKnowledgeSourceDeserialize(result); +} + +export function _createKnowledgeBaseSend( + context: Client, + knowledgeBase: KnowledgeBase, + options: CreateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeBaseSerializer(knowledgeBase), + }); +} + +export async function _createKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeBaseDeserializer(result.body); +} + +/** Creates a new knowledge base. */ +export async function createKnowledgeBase( + context: Client, + knowledgeBase: KnowledgeBase, + options: CreateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createKnowledgeBaseSend(context, knowledgeBase, options); + return _createKnowledgeBaseDeserialize(result); +} + +export function _listKnowledgeBasesSend( + context: Client, + options: ListKnowledgeBasesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listKnowledgeBasesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListKnowledgeBasesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listKnowledgeBasesResultDeserializer(result.body); +} + +/** Lists all knowledge bases available for a search service. */ +export function listKnowledgeBases( + context: Client, + options: ListKnowledgeBasesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listKnowledgeBasesSend(context, options), + _listKnowledgeBasesDeserialize, + ["200"], + { itemName: "value" }, + ); +} + +export function _getKnowledgeBaseSend( + context: Client, + knowledgeBaseName: string, + options: GetKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases('{knowledgeBaseName}'){?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeBaseDeserializer(result.body); +} + +/** Retrieves a knowledge base definition. */ +export async function getKnowledgeBase( + context: Client, + knowledgeBaseName: string, + options: GetKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getKnowledgeBaseSend(context, knowledgeBaseName, options); + return _getKnowledgeBaseDeserialize(result); +} + +export function _deleteKnowledgeBaseSend( + context: Client, + knowledgeBaseName: string, + options: DeleteKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases('{knowledgeBaseName}'){?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a knowledge base. */ +export async function deleteKnowledgeBase( + context: Client, + knowledgeBaseName: string, + options: DeleteKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteKnowledgeBaseSend(context, knowledgeBaseName, options); + return _deleteKnowledgeBaseDeserialize(result); +} + +export function _createOrUpdateKnowledgeBaseSend( + context: Client, + knowledgeBase: KnowledgeBase, + knowledgeBaseName: string, + options: CreateOrUpdateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases('{knowledgeBaseName}'){?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeBaseSerializer(knowledgeBase), + }); +} + +export async function _createOrUpdateKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeBaseDeserializer(result.body); +} + +/** Creates a new knowledge base or updates a knowledge base if it already exists. */ +export async function createOrUpdateKnowledgeBase( + context: Client, + knowledgeBase: KnowledgeBase, + knowledgeBaseName: string, + options: CreateOrUpdateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateKnowledgeBaseSend( + context, + knowledgeBase, + knowledgeBaseName, + options, + ); + return _createOrUpdateKnowledgeBaseDeserialize(result); +} + +export function _createAliasSend( + context: Client, + alias: SearchAlias, + options: CreateAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchAliasSerializer(alias), + }); +} + +export async function _createAliasDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchAliasDeserializer(result.body); +} + +/** Creates a new search alias. */ +export async function createAlias( + context: Client, + alias: SearchAlias, + options: CreateAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createAliasSend(context, alias, options); + return _createAliasDeserialize(result); +} + +export function _listAliasesSend( + context: Client, + options: ListAliasesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listAliasesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListAliasesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listAliasesResultDeserializer(result.body); +} + +/** Lists all aliases available for a search service. */ +export function listAliases( + context: Client, + options: ListAliasesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listAliasesSend(context, options), + _listAliasesDeserialize, + ["200"], + { itemName: "aliases" }, + ); +} + +export function _getAliasSend( + context: Client, + aliasName: string, + options: GetAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases('{aliasName}'){?api%2Dversion}", + { + aliasName: aliasName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getAliasDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchAliasDeserializer(result.body); +} + +/** Retrieves an alias definition. */ +export async function getAlias( + context: Client, + aliasName: string, + options: GetAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getAliasSend(context, aliasName, options); + return _getAliasDeserialize(result); +} + +export function _deleteAliasSend( + context: Client, + aliasName: string, + options: DeleteAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases('{aliasName}'){?api%2Dversion}", + { + aliasName: aliasName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteAliasDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a search alias and its associated mapping to an index. This operation is permanent, with no recovery option. The mapped index is untouched by this operation. */ +export async function deleteAlias( + context: Client, + aliasName: string, + options: DeleteAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteAliasSend(context, aliasName, options); + return _deleteAliasDeserialize(result); +} + +export function _createOrUpdateAliasSend( + context: Client, + alias: SearchAlias, + aliasName: string, + options: CreateOrUpdateAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases('{aliasName}'){?api%2Dversion}", + { + aliasName: aliasName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchAliasSerializer(alias), + }); +} + +export async function _createOrUpdateAliasDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchAliasDeserializer(result.body); +} + +/** Creates a new search alias or updates an alias if it already exists. */ +export async function createOrUpdateAlias( + context: Client, + alias: SearchAlias, + aliasName: string, + options: CreateOrUpdateAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateAliasSend(context, alias, aliasName, options); + return _createOrUpdateAliasDeserialize(result); +} + +export function _analyzeTextSend( + context: Client, + request: AnalyzeTextOptions, + indexName: string, + options: AnalyzeTextOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/search.analyze{?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: analyzeTextOptionsSerializer(request), + }); +} + +export async function _analyzeTextDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return analyzeResultDeserializer(result.body); +} + +/** Shows how an analyzer breaks text into tokens. */ +export async function analyzeText( + context: Client, + request: AnalyzeTextOptions, + indexName: string, + options: AnalyzeTextOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _analyzeTextSend(context, request, indexName, options); + return _analyzeTextDeserialize(result); +} + +export function _getIndexStatisticsSend( + context: Client, + indexName: string, + options: GetIndexStatisticsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/search.stats{?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexStatisticsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return getIndexStatisticsResultDeserializer(result.body); +} + +/** Returns statistics for the given index, including a document count and storage usage. */ +export async function getIndexStatistics( + context: Client, + indexName: string, + options: GetIndexStatisticsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexStatisticsSend(context, indexName, options); + return _getIndexStatisticsDeserialize(result); +} + +export function _createIndexSend( + context: Client, + index: SearchIndex, + options: CreateIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexSerializer(index), + }); +} + +export async function _createIndexDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexDeserializer(result.body); +} + +/** Creates a new search index. */ +export async function createIndex( + context: Client, + index: SearchIndex, + options: CreateIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createIndexSend(context, index, options); + return _createIndexDeserialize(result); +} + +export function _listIndexesSend( + context: Client, + options: ListIndexesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listIndexesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListIndexesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listIndexesResultDeserializer(result.body); +} + +/** Lists all indexes available for a search service. */ +export function listIndexes( + context: Client, + options: ListIndexesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listIndexesSend(context, options), + _listIndexesDeserialize, + ["200"], + { itemName: "indexes" }, + ); +} + +export function _getIndexSend( + context: Client, + indexName: string, + options: GetIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}'){?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexDeserializer(result.body); +} + +/** Retrieves an index definition. */ +export async function getIndex( + context: Client, + indexName: string, + options: GetIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexSend(context, indexName, options); + return _getIndexDeserialize(result); +} + +export function _deleteIndexSend( + context: Client, + indexName: string, + options: DeleteIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}'){?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteIndexDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. */ +export async function deleteIndex( + context: Client, + indexName: string, + options: DeleteIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteIndexSend(context, indexName, options); + return _deleteIndexDeserialize(result); +} + +export function _createOrUpdateIndexSend( + context: Client, + index: SearchIndex, + indexName: string, + options: CreateOrUpdateIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}'){?api%2Dversion,allowIndexDowntime}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + allowIndexDowntime: options?.allowIndexDowntime, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexSerializer(index), + }); +} + +export async function _createOrUpdateIndexDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexDeserializer(result.body); +} + +/** Creates a new search index or updates an index if it already exists. */ +export async function createOrUpdateIndex( + context: Client, + index: SearchIndex, + indexName: string, + options: CreateOrUpdateIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateIndexSend(context, index, indexName, options); + return _createOrUpdateIndexDeserialize(result); +} + +export function _createSynonymMapSend( + context: Client, + synonymMap: SynonymMap, + options: CreateSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: synonymMapSerializer(synonymMap), + }); +} + +export async function _createSynonymMapDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return synonymMapDeserializer(result.body); +} + +/** Creates a new synonym map. */ +export async function createSynonymMap( + context: Client, + synonymMap: SynonymMap, + options: CreateSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createSynonymMapSend(context, synonymMap, options); + return _createSynonymMapDeserialize(result); +} + +export function _getSynonymMapsSend( + context: Client, + options: GetSynonymMapsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSynonymMapsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listSynonymMapsResultDeserializer(result.body); +} + +/** Lists all synonym maps available for a search service. */ +export async function getSynonymMaps( + context: Client, + options: GetSynonymMapsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSynonymMapsSend(context, options); + return _getSynonymMapsDeserialize(result); +} + +export function _getSynonymMapSend( + context: Client, + synonymMapName: string, + options: GetSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps('{synonymMapName}'){?api%2Dversion}", + { + synonymMapName: synonymMapName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSynonymMapDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return synonymMapDeserializer(result.body); +} + +/** Retrieves a synonym map definition. */ +export async function getSynonymMap( + context: Client, + synonymMapName: string, + options: GetSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSynonymMapSend(context, synonymMapName, options); + return _getSynonymMapDeserialize(result); +} + +export function _deleteSynonymMapSend( + context: Client, + synonymMapName: string, + options: DeleteSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps('{synonymMapName}'){?api%2Dversion}", + { + synonymMapName: synonymMapName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteSynonymMapDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a synonym map. */ +export async function deleteSynonymMap( + context: Client, + synonymMapName: string, + options: DeleteSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteSynonymMapSend(context, synonymMapName, options); + return _deleteSynonymMapDeserialize(result); +} + +export function _createOrUpdateSynonymMapSend( + context: Client, + synonymMap: SynonymMap, + synonymMapName: string, + options: CreateOrUpdateSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps('{synonymMapName}'){?api%2Dversion}", + { + synonymMapName: synonymMapName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: synonymMapSerializer(synonymMap), + }); +} + +export async function _createOrUpdateSynonymMapDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return synonymMapDeserializer(result.body); +} + +/** Creates a new synonym map or updates a synonym map if it already exists. */ +export async function createOrUpdateSynonymMap( + context: Client, + synonymMap: SynonymMap, + synonymMapName: string, + options: CreateOrUpdateSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateSynonymMapSend(context, synonymMap, synonymMapName, options); + return _createOrUpdateSynonymMapDeserialize(result); +} diff --git a/sdk/search/search-documents/generated/searchIndex/api/options.ts b/sdk/search/search-documents/generated/searchIndex/api/options.ts new file mode 100644 index 000000000000..d1ea6adfa624 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndex/api/options.ts @@ -0,0 +1,234 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface ListIndexStatsSummaryOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetServiceStatisticsOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateKnowledgeSourceOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListKnowledgeSourcesOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetKnowledgeSourceOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteKnowledgeSourceOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateKnowledgeSourceOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateKnowledgeBaseOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListKnowledgeBasesOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetKnowledgeBaseOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteKnowledgeBaseOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateKnowledgeBaseOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateAliasOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListAliasesOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetAliasOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteAliasOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateAliasOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface AnalyzeTextOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface GetIndexStatisticsOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface CreateIndexOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListIndexesOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface DeleteIndexOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateIndexOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */ + allowIndexDowntime?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface CreateSynonymMapOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSynonymMapsOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSynonymMapOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteSynonymMapOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateSynonymMapOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} diff --git a/sdk/search/search-documents/generated/searchIndex/api/searchIndexContext.ts b/sdk/search/search-documents/generated/searchIndex/api/searchIndexContext.ts new file mode 100644 index 000000000000..a44aa460b06d --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndex/api/searchIndexContext.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface SearchIndexContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; +} + +/** Optional parameters for the client. */ +export interface SearchIndexClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createSearchIndex( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexClientOptionalParams = {}, +): SearchIndexContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion } as SearchIndexContext; +} diff --git a/sdk/search/search-documents/generated/searchIndex/index.ts b/sdk/search/search-documents/generated/searchIndex/index.ts new file mode 100644 index 000000000000..cb9a864c1562 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndex/index.ts @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { SearchIndexClient } from "./searchIndexClient.js"; +export { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, + SearchIndexContext, + SearchIndexClientOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/generated/searchIndex/searchIndexClient.ts b/sdk/search/search-documents/generated/searchIndex/searchIndexClient.ts new file mode 100644 index 000000000000..cf4ab92af502 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndex/searchIndexClient.ts @@ -0,0 +1,343 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + createSearchIndex, + SearchIndexContext, + SearchIndexClientOptionalParams, +} from "./api/index.js"; +import { + SynonymMap, + ListSynonymMapsResult, + SearchIndex, + GetIndexStatisticsResult, + AnalyzeTextOptions, + AnalyzeResult, + SearchAlias, + KnowledgeBase, + KnowledgeSourceUnion, + SearchServiceStatistics, + IndexStatisticsSummary, +} from "../models/azure/search/documents/indexes/models.js"; +import { PagedAsyncIterableIterator } from "../static-helpers/pagingHelpers.js"; +import { + listIndexStatsSummary, + getServiceStatistics, + createKnowledgeSource, + listKnowledgeSources, + getKnowledgeSource, + deleteKnowledgeSource, + createOrUpdateKnowledgeSource, + createKnowledgeBase, + listKnowledgeBases, + getKnowledgeBase, + deleteKnowledgeBase, + createOrUpdateKnowledgeBase, + createAlias, + listAliases, + getAlias, + deleteAlias, + createOrUpdateAlias, + analyzeText, + getIndexStatistics, + createIndex, + listIndexes, + getIndex, + deleteIndex, + createOrUpdateIndex, + createSynonymMap, + getSynonymMaps, + getSynonymMap, + deleteSynonymMap, + createOrUpdateSynonymMap, +} from "./api/operations.js"; +import { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, +} from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { SearchIndexClientOptionalParams } from "./api/searchIndexContext.js"; + +export class SearchIndexClient { + private _client: SearchIndexContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createSearchIndex(endpointParam, credential, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** Retrieves a summary of statistics for all indexes in the search service. */ + listIndexStatsSummary( + options: ListIndexStatsSummaryOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listIndexStatsSummary(this._client, options); + } + + /** Gets service level statistics for a search service. */ + getServiceStatistics( + options: GetServiceStatisticsOptionalParams = { requestOptions: {} }, + ): Promise { + return getServiceStatistics(this._client, options); + } + + /** Creates a new knowledge source. */ + createKnowledgeSource( + knowledgeSource: KnowledgeSourceUnion, + options: CreateKnowledgeSourceOptionalParams = { requestOptions: {} }, + ): Promise { + return createKnowledgeSource(this._client, knowledgeSource, options); + } + + /** Lists all knowledge sources available for a search service. */ + listKnowledgeSources( + options: ListKnowledgeSourcesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listKnowledgeSources(this._client, options); + } + + /** Retrieves a knowledge source definition. */ + getKnowledgeSource( + sourceName: string, + options: GetKnowledgeSourceOptionalParams = { requestOptions: {} }, + ): Promise { + return getKnowledgeSource(this._client, sourceName, options); + } + + /** Deletes an existing knowledge source. */ + deleteKnowledgeSource( + sourceName: string, + options: DeleteKnowledgeSourceOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteKnowledgeSource(this._client, sourceName, options); + } + + /** Creates a new knowledge source or updates an knowledge source if it already exists. */ + createOrUpdateKnowledgeSource( + knowledgeSource: KnowledgeSourceUnion, + sourceName: string, + options: CreateOrUpdateKnowledgeSourceOptionalParams = { + requestOptions: {}, + }, + ): Promise { + return createOrUpdateKnowledgeSource(this._client, knowledgeSource, sourceName, options); + } + + /** Creates a new knowledge base. */ + createKnowledgeBase( + knowledgeBase: KnowledgeBase, + options: CreateKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return createKnowledgeBase(this._client, knowledgeBase, options); + } + + /** Lists all knowledge bases available for a search service. */ + listKnowledgeBases( + options: ListKnowledgeBasesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listKnowledgeBases(this._client, options); + } + + /** Retrieves a knowledge base definition. */ + getKnowledgeBase( + knowledgeBaseName: string, + options: GetKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return getKnowledgeBase(this._client, knowledgeBaseName, options); + } + + /** Deletes a knowledge base. */ + deleteKnowledgeBase( + knowledgeBaseName: string, + options: DeleteKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteKnowledgeBase(this._client, knowledgeBaseName, options); + } + + /** Creates a new knowledge base or updates a knowledge base if it already exists. */ + createOrUpdateKnowledgeBase( + knowledgeBase: KnowledgeBase, + knowledgeBaseName: string, + options: CreateOrUpdateKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateKnowledgeBase(this._client, knowledgeBase, knowledgeBaseName, options); + } + + /** Creates a new search alias. */ + createAlias( + alias: SearchAlias, + options: CreateAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return createAlias(this._client, alias, options); + } + + /** Lists all aliases available for a search service. */ + listAliases( + options: ListAliasesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listAliases(this._client, options); + } + + /** Retrieves an alias definition. */ + getAlias( + aliasName: string, + options: GetAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return getAlias(this._client, aliasName, options); + } + + /** Deletes a search alias and its associated mapping to an index. This operation is permanent, with no recovery option. The mapped index is untouched by this operation. */ + deleteAlias( + aliasName: string, + options: DeleteAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteAlias(this._client, aliasName, options); + } + + /** Creates a new search alias or updates an alias if it already exists. */ + createOrUpdateAlias( + alias: SearchAlias, + aliasName: string, + options: CreateOrUpdateAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateAlias(this._client, alias, aliasName, options); + } + + /** Shows how an analyzer breaks text into tokens. */ + analyzeText( + request: AnalyzeTextOptions, + indexName: string, + options: AnalyzeTextOptionalParams = { requestOptions: {} }, + ): Promise { + return analyzeText(this._client, request, indexName, options); + } + + /** Returns statistics for the given index, including a document count and storage usage. */ + getIndexStatistics( + indexName: string, + options: GetIndexStatisticsOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexStatistics(this._client, indexName, options); + } + + /** Creates a new search index. */ + createIndex( + index: SearchIndex, + options: CreateIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return createIndex(this._client, index, options); + } + + /** Lists all indexes available for a search service. */ + listIndexes( + options: ListIndexesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listIndexes(this._client, options); + } + + /** Retrieves an index definition. */ + getIndex( + indexName: string, + options: GetIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndex(this._client, indexName, options); + } + + /** Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. */ + deleteIndex( + indexName: string, + options: DeleteIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteIndex(this._client, indexName, options); + } + + /** Creates a new search index or updates an index if it already exists. */ + createOrUpdateIndex( + index: SearchIndex, + indexName: string, + options: CreateOrUpdateIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateIndex(this._client, index, indexName, options); + } + + /** Creates a new synonym map. */ + createSynonymMap( + synonymMap: SynonymMap, + options: CreateSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return createSynonymMap(this._client, synonymMap, options); + } + + /** Lists all synonym maps available for a search service. */ + getSynonymMaps( + options: GetSynonymMapsOptionalParams = { requestOptions: {} }, + ): Promise { + return getSynonymMaps(this._client, options); + } + + /** Retrieves a synonym map definition. */ + getSynonymMap( + synonymMapName: string, + options: GetSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return getSynonymMap(this._client, synonymMapName, options); + } + + /** Deletes a synonym map. */ + deleteSynonymMap( + synonymMapName: string, + options: DeleteSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteSynonymMap(this._client, synonymMapName, options); + } + + /** Creates a new synonym map or updates a synonym map if it already exists. */ + createOrUpdateSynonymMap( + synonymMap: SynonymMap, + synonymMapName: string, + options: CreateOrUpdateSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateSynonymMap(this._client, synonymMap, synonymMapName, options); + } +} diff --git a/sdk/search/search-documents/generated/searchIndexer/api/index.ts b/sdk/search/search-documents/generated/searchIndexer/api/index.ts new file mode 100644 index 000000000000..66ccb1240c61 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndexer/api/index.ts @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + resetSkills, + createSkillset, + getSkillsets, + getSkillset, + deleteSkillset, + createOrUpdateSkillset, + getIndexerStatus, + createIndexer, + getIndexers, + getIndexer, + deleteIndexer, + createOrUpdateIndexer, + runIndexer, + resetDocuments, + resync, + resetIndexer, + createDataSourceConnection, + getDataSourceConnections, + getDataSourceConnection, + deleteDataSourceConnection, + createOrUpdateDataSourceConnection, +} from "./operations.js"; +export { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, +} from "./options.js"; +export { + createSearchIndexer, + SearchIndexerContext, + SearchIndexerClientOptionalParams, +} from "./searchIndexerContext.js"; diff --git a/sdk/search/search-documents/generated/searchIndexer/api/operations.ts b/sdk/search/search-documents/generated/searchIndexer/api/operations.ts new file mode 100644 index 000000000000..ef684553e558 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndexer/api/operations.ts @@ -0,0 +1,1132 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { SearchIndexerContext as Client } from "./index.js"; +import { + SearchIndexerDataSourceConnection, + searchIndexerDataSourceConnectionSerializer, + searchIndexerDataSourceConnectionDeserializer, + ListDataSourcesResult, + listDataSourcesResultDeserializer, + documentKeysOrIdsSerializer, + SearchIndexer, + searchIndexerSerializer, + searchIndexerDeserializer, + ListIndexersResult, + listIndexersResultDeserializer, + SearchIndexerStatus, + searchIndexerStatusDeserializer, + SearchIndexerSkillset, + searchIndexerSkillsetSerializer, + searchIndexerSkillsetDeserializer, + ListSkillsetsResult, + listSkillsetsResultDeserializer, + SkillNames, + skillNamesSerializer, +} from "../../models/azure/search/documents/indexes/models.js"; +import { errorResponseDeserializer } from "../../models/azure/search/documents/models.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, +} from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _resetSkillsSend( + context: Client, + skillNames: SkillNames, + skillsetName: string, + options: ResetSkillsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}')/search.resetskills{?api%2Dversion}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + body: skillNamesSerializer(skillNames), + }); +} + +export async function _resetSkillsDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Reset an existing skillset in a search service. */ +export async function resetSkills( + context: Client, + skillNames: SkillNames, + skillsetName: string, + options: ResetSkillsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resetSkillsSend(context, skillNames, skillsetName, options); + return _resetSkillsDeserialize(result); +} + +export function _createSkillsetSend( + context: Client, + skillset: SearchIndexerSkillset, + options: CreateSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSkillsetSerializer(skillset), + }); +} + +export async function _createSkillsetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerSkillsetDeserializer(result.body); +} + +/** Creates a new skillset in a search service. */ +export async function createSkillset( + context: Client, + skillset: SearchIndexerSkillset, + options: CreateSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createSkillsetSend(context, skillset, options); + return _createSkillsetDeserialize(result); +} + +export function _getSkillsetsSend( + context: Client, + options: GetSkillsetsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSkillsetsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listSkillsetsResultDeserializer(result.body); +} + +/** List all skillsets in a search service. */ +export async function getSkillsets( + context: Client, + options: GetSkillsetsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSkillsetsSend(context, options); + return _getSkillsetsDeserialize(result); +} + +export function _getSkillsetSend( + context: Client, + skillsetName: string, + options: GetSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}'){?api%2Dversion}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSkillsetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerSkillsetDeserializer(result.body); +} + +/** Retrieves a skillset in a search service. */ +export async function getSkillset( + context: Client, + skillsetName: string, + options: GetSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSkillsetSend(context, skillsetName, options); + return _getSkillsetDeserialize(result); +} + +export function _deleteSkillsetSend( + context: Client, + skillsetName: string, + options: DeleteSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}'){?api%2Dversion}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteSkillsetDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a skillset in a search service. */ +export async function deleteSkillset( + context: Client, + skillsetName: string, + options: DeleteSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteSkillsetSend(context, skillsetName, options); + return _deleteSkillsetDeserialize(result); +} + +export function _createOrUpdateSkillsetSend( + context: Client, + skillset: SearchIndexerSkillset, + skillsetName: string, + options: CreateOrUpdateSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}'){?api%2Dversion,ignoreResetRequirements,disableCacheReprocessingChangeDetection}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + ignoreResetRequirements: options?.skipIndexerResetRequirementForCache, + disableCacheReprocessingChangeDetection: options?.disableCacheReprocessingChangeDetection, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSkillsetSerializer(skillset), + }); +} + +export async function _createOrUpdateSkillsetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerSkillsetDeserializer(result.body); +} + +/** Creates a new skillset in a search service or updates the skillset if it already exists. */ +export async function createOrUpdateSkillset( + context: Client, + skillset: SearchIndexerSkillset, + skillsetName: string, + options: CreateOrUpdateSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateSkillsetSend(context, skillset, skillsetName, options); + return _createOrUpdateSkillsetDeserialize(result); +} + +export function _getIndexerStatusSend( + context: Client, + indexerName: string, + options: GetIndexerStatusOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.status{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexerStatusDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerStatusDeserializer(result.body); +} + +/** Returns the current status and execution history of an indexer. */ +export async function getIndexerStatus( + context: Client, + indexerName: string, + options: GetIndexerStatusOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexerStatusSend(context, indexerName, options); + return _getIndexerStatusDeserialize(result); +} + +export function _createIndexerSend( + context: Client, + indexer: SearchIndexer, + options: CreateIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSerializer(indexer), + }); +} + +export async function _createIndexerDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDeserializer(result.body); +} + +/** Creates a new indexer. */ +export async function createIndexer( + context: Client, + indexer: SearchIndexer, + options: CreateIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createIndexerSend(context, indexer, options); + return _createIndexerDeserialize(result); +} + +export function _getIndexersSend( + context: Client, + options: GetIndexersOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexersDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listIndexersResultDeserializer(result.body); +} + +/** Lists all indexers available for a search service. */ +export async function getIndexers( + context: Client, + options: GetIndexersOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexersSend(context, options); + return _getIndexersDeserialize(result); +} + +export function _getIndexerSend( + context: Client, + indexerName: string, + options: GetIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}'){?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexerDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDeserializer(result.body); +} + +/** Retrieves an indexer definition. */ +export async function getIndexer( + context: Client, + indexerName: string, + options: GetIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexerSend(context, indexerName, options); + return _getIndexerDeserialize(result); +} + +export function _deleteIndexerSend( + context: Client, + indexerName: string, + options: DeleteIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}'){?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteIndexerDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes an indexer. */ +export async function deleteIndexer( + context: Client, + indexerName: string, + options: DeleteIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteIndexerSend(context, indexerName, options); + return _deleteIndexerDeserialize(result); +} + +export function _createOrUpdateIndexerSend( + context: Client, + indexer: SearchIndexer, + indexerName: string, + options: CreateOrUpdateIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}'){?api%2Dversion,ignoreResetRequirements,disableCacheReprocessingChangeDetection}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + ignoreResetRequirements: options?.skipIndexerResetRequirementForCache, + disableCacheReprocessingChangeDetection: options?.disableCacheReprocessingChangeDetection, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSerializer(indexer), + }); +} + +export async function _createOrUpdateIndexerDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDeserializer(result.body); +} + +/** Creates a new indexer or updates an indexer if it already exists. */ +export async function createOrUpdateIndexer( + context: Client, + indexer: SearchIndexer, + indexerName: string, + options: CreateOrUpdateIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateIndexerSend(context, indexer, indexerName, options); + return _createOrUpdateIndexerDeserialize(result); +} + +export function _runIndexerSend( + context: Client, + indexerName: string, + options: RunIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.run{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _runIndexerDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["202"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Runs an indexer on-demand. */ +export async function runIndexer( + context: Client, + indexerName: string, + options: RunIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _runIndexerSend(context, indexerName, options); + return _runIndexerDeserialize(result); +} + +export function _resetDocumentsSend( + context: Client, + indexerName: string, + options: ResetDocumentsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.resetdocs{?api%2Dversion,overwrite}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + overwrite: options?.overwrite, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + body: !options["keysOrIds"] + ? options["keysOrIds"] + : documentKeysOrIdsSerializer(options["keysOrIds"]), + }); +} + +export async function _resetDocumentsDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Resets specific documents in the datasource to be selectively re-ingested by the indexer. */ +export async function resetDocuments( + context: Client, + indexerName: string, + options: ResetDocumentsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resetDocumentsSend(context, indexerName, options); + return _resetDocumentsDeserialize(result); +} + +export function _resyncSend( + context: Client, + indexerName: string, + options: ResyncOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.resync{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _resyncDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Resync selective options from the datasource to be re-ingested by the indexer." */ +export async function resync( + context: Client, + indexerName: string, + options: ResyncOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resyncSend(context, indexerName, options); + return _resyncDeserialize(result); +} + +export function _resetIndexerSend( + context: Client, + indexerName: string, + options: ResetIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.reset{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _resetIndexerDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Resets the change tracking state associated with an indexer. */ +export async function resetIndexer( + context: Client, + indexerName: string, + options: ResetIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resetIndexerSend(context, indexerName, options); + return _resetIndexerDeserialize(result); +} + +export function _createDataSourceConnectionSend( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + options: CreateDataSourceConnectionOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerDataSourceConnectionSerializer(dataSource), + }); +} + +export async function _createDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDataSourceConnectionDeserializer(result.body); +} + +/** Creates a new datasource. */ +export async function createDataSourceConnection( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + options: CreateDataSourceConnectionOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createDataSourceConnectionSend(context, dataSource, options); + return _createDataSourceConnectionDeserialize(result); +} + +export function _getDataSourceConnectionsSend( + context: Client, + options: GetDataSourceConnectionsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDataSourceConnectionsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listDataSourcesResultDeserializer(result.body); +} + +/** Lists all datasources available for a search service. */ +export async function getDataSourceConnections( + context: Client, + options: GetDataSourceConnectionsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDataSourceConnectionsSend(context, options); + return _getDataSourceConnectionsDeserialize(result); +} + +export function _getDataSourceConnectionSend( + context: Client, + dataSourceName: string, + options: GetDataSourceConnectionOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources('{dataSourceName}'){?api%2Dversion}", + { + dataSourceName: dataSourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDataSourceConnectionDeserializer(result.body); +} + +/** Retrieves a datasource definition. */ +export async function getDataSourceConnection( + context: Client, + dataSourceName: string, + options: GetDataSourceConnectionOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDataSourceConnectionSend(context, dataSourceName, options); + return _getDataSourceConnectionDeserialize(result); +} + +export function _deleteDataSourceConnectionSend( + context: Client, + dataSourceName: string, + options: DeleteDataSourceConnectionOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources('{dataSourceName}'){?api%2Dversion}", + { + dataSourceName: dataSourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a datasource. */ +export async function deleteDataSourceConnection( + context: Client, + dataSourceName: string, + options: DeleteDataSourceConnectionOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteDataSourceConnectionSend(context, dataSourceName, options); + return _deleteDataSourceConnectionDeserialize(result); +} + +export function _createOrUpdateDataSourceConnectionSend( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + dataSourceName: string, + options: CreateOrUpdateDataSourceConnectionOptionalParams = { + requestOptions: {}, + }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources('{dataSourceName}'){?api%2Dversion,ignoreResetRequirements}", + { + dataSourceName: dataSourceName, + "api%2Dversion": context.apiVersion, + ignoreResetRequirements: options?.skipIndexerResetRequirementForCache, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerDataSourceConnectionSerializer(dataSource), + }); +} + +export async function _createOrUpdateDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDataSourceConnectionDeserializer(result.body); +} + +/** Creates a new datasource or updates a datasource if it already exists. */ +export async function createOrUpdateDataSourceConnection( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + dataSourceName: string, + options: CreateOrUpdateDataSourceConnectionOptionalParams = { + requestOptions: {}, + }, +): Promise { + const result = await _createOrUpdateDataSourceConnectionSend( + context, + dataSource, + dataSourceName, + options, + ); + return _createOrUpdateDataSourceConnectionDeserialize(result); +} diff --git a/sdk/search/search-documents/generated/searchIndexer/api/options.ts b/sdk/search/search-documents/generated/searchIndexer/api/options.ts new file mode 100644 index 000000000000..cbfff4b16893 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndexer/api/options.ts @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { DocumentKeysOrIds } from "../../models/azure/search/documents/indexes/models.js"; +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface ResetSkillsOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateSkillsetOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSkillsetsOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSkillsetOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteSkillsetOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateSkillsetOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Ignores cache reset requirements. */ + skipIndexerResetRequirementForCache?: boolean; + /** Disables cache reprocessing change detection. */ + disableCacheReprocessingChangeDetection?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexerStatusOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexersOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteIndexerOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateIndexerOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Ignores cache reset requirements. */ + skipIndexerResetRequirementForCache?: boolean; + /** Disables cache reprocessing change detection. */ + disableCacheReprocessingChangeDetection?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface RunIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ResetDocumentsOptionalParams extends OperationOptions { + /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */ + overwrite?: boolean; + /** The keys or ids of the documents to be re-ingested. If keys are provided, the document key field must be specified in the indexer configuration. If ids are provided, the document key field is ignored. */ + keysOrIds?: DocumentKeysOrIds; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ResyncOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ResetIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateDataSourceConnectionOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetDataSourceConnectionsOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetDataSourceConnectionOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteDataSourceConnectionOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateDataSourceConnectionOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Ignores cache reset requirements. */ + skipIndexerResetRequirementForCache?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} diff --git a/sdk/search/search-documents/generated/searchIndexer/api/searchIndexerContext.ts b/sdk/search/search-documents/generated/searchIndexer/api/searchIndexerContext.ts new file mode 100644 index 000000000000..83dd21a71ee2 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndexer/api/searchIndexerContext.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface SearchIndexerContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; +} + +/** Optional parameters for the client. */ +export interface SearchIndexerClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createSearchIndexer( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexerClientOptionalParams = {}, +): SearchIndexerContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion } as SearchIndexerContext; +} diff --git a/sdk/search/search-documents/generated/searchIndexer/index.ts b/sdk/search/search-documents/generated/searchIndexer/index.ts new file mode 100644 index 000000000000..79ebbbea79d6 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndexer/index.ts @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { SearchIndexerClient } from "./searchIndexerClient.js"; +export { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, + SearchIndexerContext, + SearchIndexerClientOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/generated/searchIndexer/searchIndexerClient.ts b/sdk/search/search-documents/generated/searchIndexer/searchIndexerClient.ts new file mode 100644 index 000000000000..3b7ebc8bf189 --- /dev/null +++ b/sdk/search/search-documents/generated/searchIndexer/searchIndexerClient.ts @@ -0,0 +1,261 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + createSearchIndexer, + SearchIndexerContext, + SearchIndexerClientOptionalParams, +} from "./api/index.js"; +import { + SearchIndexerDataSourceConnection, + ListDataSourcesResult, + SearchIndexer, + ListIndexersResult, + SearchIndexerStatus, + SearchIndexerSkillset, + ListSkillsetsResult, + SkillNames, +} from "../models/azure/search/documents/indexes/models.js"; +import { + resetSkills, + createSkillset, + getSkillsets, + getSkillset, + deleteSkillset, + createOrUpdateSkillset, + getIndexerStatus, + createIndexer, + getIndexers, + getIndexer, + deleteIndexer, + createOrUpdateIndexer, + runIndexer, + resetDocuments, + resync, + resetIndexer, + createDataSourceConnection, + getDataSourceConnections, + getDataSourceConnection, + deleteDataSourceConnection, + createOrUpdateDataSourceConnection, +} from "./api/operations.js"; +import { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, +} from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { SearchIndexerClientOptionalParams } from "./api/searchIndexerContext.js"; + +export class SearchIndexerClient { + private _client: SearchIndexerContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexerClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createSearchIndexer(endpointParam, credential, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** Reset an existing skillset in a search service. */ + resetSkills( + skillNames: SkillNames, + skillsetName: string, + options: ResetSkillsOptionalParams = { requestOptions: {} }, + ): Promise { + return resetSkills(this._client, skillNames, skillsetName, options); + } + + /** Creates a new skillset in a search service. */ + createSkillset( + skillset: SearchIndexerSkillset, + options: CreateSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return createSkillset(this._client, skillset, options); + } + + /** List all skillsets in a search service. */ + getSkillsets( + options: GetSkillsetsOptionalParams = { requestOptions: {} }, + ): Promise { + return getSkillsets(this._client, options); + } + + /** Retrieves a skillset in a search service. */ + getSkillset( + skillsetName: string, + options: GetSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return getSkillset(this._client, skillsetName, options); + } + + /** Deletes a skillset in a search service. */ + deleteSkillset( + skillsetName: string, + options: DeleteSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteSkillset(this._client, skillsetName, options); + } + + /** Creates a new skillset in a search service or updates the skillset if it already exists. */ + createOrUpdateSkillset( + skillset: SearchIndexerSkillset, + skillsetName: string, + options: CreateOrUpdateSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateSkillset(this._client, skillset, skillsetName, options); + } + + /** Returns the current status and execution history of an indexer. */ + getIndexerStatus( + indexerName: string, + options: GetIndexerStatusOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexerStatus(this._client, indexerName, options); + } + + /** Creates a new indexer. */ + createIndexer( + indexer: SearchIndexer, + options: CreateIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return createIndexer(this._client, indexer, options); + } + + /** Lists all indexers available for a search service. */ + getIndexers( + options: GetIndexersOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexers(this._client, options); + } + + /** Retrieves an indexer definition. */ + getIndexer( + indexerName: string, + options: GetIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexer(this._client, indexerName, options); + } + + /** Deletes an indexer. */ + deleteIndexer( + indexerName: string, + options: DeleteIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteIndexer(this._client, indexerName, options); + } + + /** Creates a new indexer or updates an indexer if it already exists. */ + createOrUpdateIndexer( + indexer: SearchIndexer, + indexerName: string, + options: CreateOrUpdateIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateIndexer(this._client, indexer, indexerName, options); + } + + /** Runs an indexer on-demand. */ + runIndexer( + indexerName: string, + options: RunIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return runIndexer(this._client, indexerName, options); + } + + /** Resets specific documents in the datasource to be selectively re-ingested by the indexer. */ + resetDocuments( + indexerName: string, + options: ResetDocumentsOptionalParams = { requestOptions: {} }, + ): Promise { + return resetDocuments(this._client, indexerName, options); + } + + /** Resync selective options from the datasource to be re-ingested by the indexer." */ + resync( + indexerName: string, + options: ResyncOptionalParams = { requestOptions: {} }, + ): Promise { + return resync(this._client, indexerName, options); + } + + /** Resets the change tracking state associated with an indexer. */ + resetIndexer( + indexerName: string, + options: ResetIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return resetIndexer(this._client, indexerName, options); + } + + /** Creates a new datasource. */ + createDataSourceConnection( + dataSource: SearchIndexerDataSourceConnection, + options: CreateDataSourceConnectionOptionalParams = { requestOptions: {} }, + ): Promise { + return createDataSourceConnection(this._client, dataSource, options); + } + + /** Lists all datasources available for a search service. */ + getDataSourceConnections( + options: GetDataSourceConnectionsOptionalParams = { requestOptions: {} }, + ): Promise { + return getDataSourceConnections(this._client, options); + } + + /** Retrieves a datasource definition. */ + getDataSourceConnection( + dataSourceName: string, + options: GetDataSourceConnectionOptionalParams = { requestOptions: {} }, + ): Promise { + return getDataSourceConnection(this._client, dataSourceName, options); + } + + /** Deletes a datasource. */ + deleteDataSourceConnection( + dataSourceName: string, + options: DeleteDataSourceConnectionOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteDataSourceConnection(this._client, dataSourceName, options); + } + + /** Creates a new datasource or updates a datasource if it already exists. */ + createOrUpdateDataSourceConnection( + dataSource: SearchIndexerDataSourceConnection, + dataSourceName: string, + options: CreateOrUpdateDataSourceConnectionOptionalParams = { + requestOptions: {}, + }, + ): Promise { + return createOrUpdateDataSourceConnection(this._client, dataSource, dataSourceName, options); + } +} diff --git a/sdk/search/search-documents/generated/static-helpers/pagingHelpers.ts b/sdk/search/search-documents/generated/static-helpers/pagingHelpers.ts new file mode 100644 index 000000000000..5a3472a3f0fe --- /dev/null +++ b/sdk/search/search-documents/generated/static-helpers/pagingHelpers.ts @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { Client, createRestError, PathUncheckedResponse } from "@azure-rest/core-client"; +import { RestError } from "@azure/core-rest-pipeline"; + +/** + * Options for the byPage method + */ +export interface PageSettings { + /** + * A reference to a specific page to start iterating from. + */ + continuationToken?: string; +} + +/** + * An interface that describes a page of results. + */ +export type ContinuablePage = TPage & { + /** + * The token that keeps track of where to continue the iterator + */ + continuationToken?: string; +}; + +/** + * An interface that allows async iterable iteration both to completion and by page. + */ +export interface PagedAsyncIterableIterator< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, +> { + /** + * The next method, part of the iteration protocol + */ + next(): Promise>; + /** + * The connection to the async iterator, part of the iteration protocol + */ + [Symbol.asyncIterator](): PagedAsyncIterableIterator; + /** + * Return an AsyncIterableIterator that works a page at a time + */ + byPage: (settings?: TPageSettings) => AsyncIterableIterator>; +} + +/** + * An interface that describes how to communicate with the service. + */ +export interface PagedResult< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, +> { + /** + * Link to the first page of results. + */ + firstPageLink?: string; + /** + * A method that returns a page of results. + */ + getPage: (pageLink?: string) => Promise<{ page: TPage; nextPageLink?: string } | undefined>; + /** + * a function to implement the `byPage` method on the paged async iterator. + */ + byPage?: (settings?: TPageSettings) => AsyncIterableIterator>; + + /** + * A function to extract elements from a page. + */ + toElements?: (page: TPage) => TElement[]; +} + +/** + * Options for the paging helper + */ +export interface BuildPagedAsyncIteratorOptions { + itemName?: string; + nextLinkName?: string; + nextLinkMethod?: "GET" | "POST"; +} + +/** + * Helper to paginate results in a generic way and return a PagedAsyncIterableIterator + */ +export function buildPagedAsyncIterator< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, + TResponse extends PathUncheckedResponse = PathUncheckedResponse, +>( + client: Client, + getInitialResponse: () => PromiseLike, + processResponseBody: (result: TResponse) => PromiseLike, + expectedStatuses: string[], + options: BuildPagedAsyncIteratorOptions = {}, +): PagedAsyncIterableIterator { + const itemName = options.itemName ?? "value"; + const nextLinkName = options.nextLinkName ?? "nextLink"; + const nextLinkMethod = options.nextLinkMethod ?? "GET"; + const pagedResult: PagedResult = { + getPage: async (pageLink?: string) => { + const result = + pageLink === undefined + ? await getInitialResponse() + : nextLinkMethod === "POST" + ? await client.pathUnchecked(pageLink).post() + : await client.pathUnchecked(pageLink).get(); + checkPagingRequest(result, expectedStatuses); + const results = await processResponseBody(result as TResponse); + const nextLink = getNextLink(results, nextLinkName); + const values = getElements(results, itemName) as TPage; + return { + page: values, + nextPageLink: nextLink, + }; + }, + byPage: (settings?: TPageSettings) => { + const { continuationToken } = settings ?? {}; + return getPageAsyncIterator(pagedResult, { + pageLink: continuationToken, + }); + }, + }; + return getPagedAsyncIterator(pagedResult); +} + +/** + * returns an async iterator that iterates over results. It also has a `byPage` + * method that returns pages of items at once. + * + * @param pagedResult - an object that specifies how to get pages. + * @returns a paged async iterator that iterates over results. + */ + +function getPagedAsyncIterator< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, +>( + pagedResult: PagedResult, +): PagedAsyncIterableIterator { + const iter = getItemAsyncIterator(pagedResult); + return { + next() { + return iter.next(); + }, + [Symbol.asyncIterator]() { + return this; + }, + byPage: + pagedResult?.byPage ?? + ((settings?: TPageSettings) => { + const { continuationToken } = settings ?? {}; + return getPageAsyncIterator(pagedResult, { + pageLink: continuationToken, + }); + }), + }; +} + +async function* getItemAsyncIterator( + pagedResult: PagedResult, +): AsyncIterableIterator { + const pages = getPageAsyncIterator(pagedResult); + for await (const page of pages) { + yield* page as unknown as TElement[]; + } +} + +async function* getPageAsyncIterator( + pagedResult: PagedResult, + options: { + pageLink?: string; + } = {}, +): AsyncIterableIterator> { + const { pageLink } = options; + let response = await pagedResult.getPage(pageLink ?? pagedResult.firstPageLink); + if (!response) { + return; + } + let result = response.page as ContinuablePage; + result.continuationToken = response.nextPageLink; + yield result; + while (response.nextPageLink) { + response = await pagedResult.getPage(response.nextPageLink); + if (!response) { + return; + } + result = response.page as ContinuablePage; + result.continuationToken = response.nextPageLink; + yield result; + } +} + +/** + * Gets for the value of nextLink in the body + */ +function getNextLink(body: unknown, nextLinkName?: string): string | undefined { + if (!nextLinkName) { + return undefined; + } + + const nextLink = (body as Record)[nextLinkName]; + + if (typeof nextLink !== "string" && typeof nextLink !== "undefined" && nextLink !== null) { + throw new RestError( + `Body Property ${nextLinkName} should be a string or undefined or null but got ${typeof nextLink}`, + ); + } + + if (nextLink === null) { + return undefined; + } + + return nextLink; +} + +/** + * Gets the elements of the current request in the body. + */ +function getElements(body: unknown, itemName: string): T[] { + const value = (body as Record)[itemName] as T[]; + if (!Array.isArray(value)) { + throw new RestError( + `Couldn't paginate response\n Body doesn't contain an array property with name: ${itemName}`, + ); + } + + return value ?? []; +} + +/** + * Checks if a request failed + */ +function checkPagingRequest(response: PathUncheckedResponse, expectedStatuses: string[]): void { + if (!expectedStatuses.includes(response.status)) { + throw createRestError( + `Pagination failed with unexpected statusCode ${response.status}`, + response, + ); + } +} diff --git a/sdk/search/search-documents/generated/static-helpers/serialization/serialize-record.ts b/sdk/search/search-documents/generated/static-helpers/serialization/serialize-record.ts new file mode 100644 index 000000000000..f2d3a221fef7 --- /dev/null +++ b/sdk/search/search-documents/generated/static-helpers/serialization/serialize-record.ts @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export function serializeRecord(item: any, excludes?: string[], serializer?: (item: any) => any) { + excludes = excludes ?? []; + const res: any = {}; + for (const key of Object.keys(item)) { + if (excludes.includes(key) || item[key] === undefined) { + continue; + } + if (serializer) { + res[key] = serializer(item[key]); + } else { + res[key] = item[key] as any; + } + } + return res; +} diff --git a/sdk/search/search-documents/generated/static-helpers/urlTemplate.ts b/sdk/search/search-documents/generated/static-helpers/urlTemplate.ts new file mode 100644 index 000000000000..c7109898692a --- /dev/null +++ b/sdk/search/search-documents/generated/static-helpers/urlTemplate.ts @@ -0,0 +1,227 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// --------------------- +// interfaces +// --------------------- +interface ValueOptions { + isFirst: boolean; // is first value in the expression + op?: string; // operator + varValue?: any; // variable value + varName?: string; // variable name + modifier?: string; // modifier e.g * + reserved?: boolean; // if true we'll keep reserved words with not encoding +} + +export interface UrlTemplateOptions { + // if set to true, reserved characters will not be encoded + allowReserved?: boolean; +} + +// --------------------- +// helpers +// --------------------- +function encodeComponent(val: string, reserved?: boolean, op?: string): string { + return (reserved ?? op === "+") || op === "#" + ? encodeReservedComponent(val) + : encodeRFC3986URIComponent(val); +} + +function encodeReservedComponent(str: string): string { + return str + .split(/(%[0-9A-Fa-f]{2})/g) + .map((part) => (!/%[0-9A-Fa-f]/.test(part) ? encodeURI(part) : part)) + .join(""); +} + +function encodeRFC3986URIComponent(str: string): string { + return encodeURIComponent(str).replace( + /[!'()*]/g, + (c) => `%${c.charCodeAt(0).toString(16).toUpperCase()}`, + ); +} + +function isDefined(val: any): boolean { + return val !== undefined && val !== null; +} + +function getNamedAndIfEmpty(op?: string): [boolean, string] { + return [!!op && [";", "?", "&"].includes(op), !!op && ["?", "&"].includes(op) ? "=" : ""]; +} + +function getFirstOrSep(op?: string, isFirst = false): string { + if (isFirst) { + return !op || op === "+" ? "" : op; + } else if (!op || op === "+" || op === "#") { + return ","; + } else if (op === "?") { + return "&"; + } else { + return op; + } +} + +function getExpandedValue(option: ValueOptions): string { + let isFirst = option.isFirst; + const { op, varName, varValue: value, reserved } = option; + const vals: string[] = []; + const [named, ifEmpty] = getNamedAndIfEmpty(op); + + if (Array.isArray(value)) { + for (const val of value.filter(isDefined)) { + // prepare the following parts: separator, varName, value + vals.push(`${getFirstOrSep(op, isFirst)}`); + if (named && varName) { + vals.push(`${encodeURIComponent(varName)}`); + if (val === "") { + vals.push(ifEmpty); + } else { + vals.push("="); + } + } + vals.push(encodeComponent(val, reserved, op)); + isFirst = false; + } + } else if (typeof value === "object") { + for (const key of Object.keys(value)) { + const val = value[key]; + if (!isDefined(val)) { + continue; + } + // prepare the following parts: separator, key, value + vals.push(`${getFirstOrSep(op, isFirst)}`); + if (key) { + vals.push(`${encodeURIComponent(key)}`); + if (named && val === "") { + vals.push(ifEmpty); + } else { + vals.push("="); + } + } + vals.push(encodeComponent(val, reserved, op)); + isFirst = false; + } + } + return vals.join(""); +} + +function getNonExpandedValue(option: ValueOptions): string | undefined { + const { op, varName, varValue: value, isFirst, reserved } = option; + const vals: string[] = []; + const first = getFirstOrSep(op, isFirst); + const [named, ifEmpty] = getNamedAndIfEmpty(op); + if (named && varName) { + vals.push(encodeComponent(varName, reserved, op)); + if (value === "") { + if (!ifEmpty) { + vals.push(ifEmpty); + } + return !vals.join("") ? undefined : `${first}${vals.join("")}`; + } + vals.push("="); + } + + const items = []; + if (Array.isArray(value)) { + for (const val of value.filter(isDefined)) { + items.push(encodeComponent(val, reserved, op)); + } + } else if (typeof value === "object") { + for (const key of Object.keys(value)) { + if (!isDefined(value[key])) { + continue; + } + items.push(encodeRFC3986URIComponent(key)); + items.push(encodeComponent(value[key], reserved, op)); + } + } + vals.push(items.join(",")); + return !vals.join(",") ? undefined : `${first}${vals.join("")}`; +} + +function getVarValue(option: ValueOptions): string | undefined { + const { op, varName, modifier, isFirst, reserved, varValue: value } = option; + + if (!isDefined(value)) { + return undefined; + } else if (["string", "number", "boolean"].includes(typeof value)) { + let val = value.toString(); + const [named, ifEmpty] = getNamedAndIfEmpty(op); + const vals: string[] = [getFirstOrSep(op, isFirst)]; + if (named && varName) { + // No need to encode varName considering it is already encoded + vals.push(varName); + if (val === "") { + vals.push(ifEmpty); + } else { + vals.push("="); + } + } + if (modifier && modifier !== "*") { + val = val.substring(0, parseInt(modifier, 10)); + } + vals.push(encodeComponent(val, reserved, op)); + return vals.join(""); + } else if (modifier === "*") { + return getExpandedValue(option); + } else { + return getNonExpandedValue(option); + } +} + +// --------------------------------------------------------------------------------------------------- +// This is an implementation of RFC 6570 URI Template: https://datatracker.ietf.org/doc/html/rfc6570. +// --------------------------------------------------------------------------------------------------- +export function expandUrlTemplate( + template: string, + context: Record, + option?: UrlTemplateOptions, +): string { + const result = template.replace(/\{([^{}]+)\}|([^{}]+)/g, (_, expr, text) => { + if (!expr) { + return encodeReservedComponent(text); + } + let op; + if (["+", "#", ".", "/", ";", "?", "&"].includes(expr[0])) { + op = expr[0]; + expr = expr.slice(1); + } + const varList = expr.split(/,/g); + const result = []; + for (const varSpec of varList) { + const varMatch = /([^:*]*)(?::(\d+)|(\*))?/.exec(varSpec); + if (!varMatch || !varMatch[1]) { + continue; + } + const varValue = getVarValue({ + isFirst: result.length === 0, + op, + varValue: context[varMatch[1]], + varName: varMatch[1], + modifier: varMatch[2] || varMatch[3], + reserved: option?.allowReserved, + }); + if (varValue) { + result.push(varValue); + } + } + return result.join(""); + }); + + return normalizeUnreserved(result); +} + +/** + * Normalize an expanded URI by decoding percent-encoded unreserved characters. + * RFC 3986 unreserved: "-" / "." / "~" + */ +function normalizeUnreserved(uri: string): string { + return uri.replace(/%([0-9A-Fa-f]{2})/g, (match, hex) => { + const char = String.fromCharCode(parseInt(hex, 16)); + // Decode only if it's unreserved + if (/[\-.~]/.test(char)) { + return char; + } + return match; // leave other encodings intact + }); +} diff --git a/sdk/search/search-documents/metadata.json b/sdk/search/search-documents/metadata.json new file mode 100644 index 000000000000..0eee7110dd67 --- /dev/null +++ b/sdk/search/search-documents/metadata.json @@ -0,0 +1,471 @@ +{ + "apiVersion": "2025-11-01-preview", + "emitterVersion": "0.46.1", + "crossLanguageDefinitions": { + "CrossLanguagePackageId": "Search", + "CrossLanguageDefinitionId": { + "@azure/search-documents!ErrorResponse:interface": "Azure.Core.Foundations.ErrorResponse", + "@azure/search-documents!ErrorDetail:interface": "Search.ErrorDetail", + "@azure/search-documents!ErrorAdditionalInfo:interface": "Search.ErrorAdditionalInfo", + "@azure/search-documents!SearchDocumentsResult:interface": "Search.SearchDocumentsResult", + "@azure/search-documents!FacetResult:interface": "Search.FacetResult", + "@azure/search-documents!QueryAnswerResult:interface": "Search.QueryAnswerResult", + "@azure/search-documents!DebugInfo:interface": "Search.DebugInfo", + "@azure/search-documents!QueryRewritesDebugInfo:interface": "Search.QueryRewritesDebugInfo", + "@azure/search-documents!QueryRewritesValuesDebugInfo:interface": "Search.QueryRewritesValuesDebugInfo", + "@azure/search-documents!SearchRequest:interface": "Search.SearchRequest", + "@azure/search-documents!VectorQuery:interface": "Search.VectorQuery", + "@azure/search-documents!VectorThreshold:interface": "Search.VectorThreshold", + "@azure/search-documents!VectorSimilarityThreshold:interface": "Search.VectorSimilarityThreshold", + "@azure/search-documents!SearchScoreThreshold:interface": "Search.SearchScoreThreshold", + "@azure/search-documents!VectorizedQuery:interface": "Search.VectorizedQuery", + "@azure/search-documents!VectorizableTextQuery:interface": "Search.VectorizableTextQuery", + "@azure/search-documents!VectorizableImageUrlQuery:interface": "Search.VectorizableImageUrlQuery", + "@azure/search-documents!VectorizableImageBinaryQuery:interface": "Search.VectorizableImageBinaryQuery", + "@azure/search-documents!HybridSearch:interface": "Search.HybridSearch", + "@azure/search-documents!SearchResult:interface": "Search.SearchResult", + "@azure/search-documents!QueryCaptionResult:interface": "Search.QueryCaptionResult", + "@azure/search-documents!DocumentDebugInfo:interface": "Search.DocumentDebugInfo", + "@azure/search-documents!SemanticDebugInfo:interface": "Search.SemanticDebugInfo", + "@azure/search-documents!QueryResultDocumentSemanticField:interface": "Search.QueryResultDocumentSemanticField", + "@azure/search-documents!QueryResultDocumentRerankerInput:interface": "Search.QueryResultDocumentRerankerInput", + "@azure/search-documents!VectorsDebugInfo:interface": "Search.VectorsDebugInfo", + "@azure/search-documents!QueryResultDocumentSubscores:interface": "Search.QueryResultDocumentSubscores", + "@azure/search-documents!TextResult:interface": "Search.TextResult", + "@azure/search-documents!SingleVectorFieldResult:interface": "Search.SingleVectorFieldResult", + "@azure/search-documents!QueryResultDocumentInnerHit:interface": "Search.QueryResultDocumentInnerHit", + "@azure/search-documents!SearchPostRequest:interface": "Customizations.SearchClient.searchPost.Request.anonymous", + "@azure/search-documents!LookupDocument:interface": "Search.LookupDocument", + "@azure/search-documents!SuggestDocumentsResult:interface": "Search.SuggestDocumentsResult", + "@azure/search-documents!SuggestResult:interface": "Search.SuggestResult", + "@azure/search-documents!SuggestPostRequest:interface": "Customizations.SearchClient.suggestPost.Request.anonymous", + "@azure/search-documents!IndexDocumentsBatch:interface": "Search.IndexBatch", + "@azure/search-documents!IndexAction:interface": "Search.IndexAction", + "@azure/search-documents!IndexDocumentsResult:interface": "Search.IndexDocumentsResult", + "@azure/search-documents!IndexingResult:interface": "Search.IndexingResult", + "@azure/search-documents!AutocompleteResult:interface": "Search.AutocompleteResult", + "@azure/search-documents!AutocompleteItem:interface": "Search.AutocompleteItem", + "@azure/search-documents!AutocompletePostRequest:interface": "Customizations.SearchClient.autocompletePost.Request.anonymous", + "@azure/search-documents!SynonymMap:interface": "Search.SynonymMap", + "@azure/search-documents!SearchResourceEncryptionKey:interface": "Search.SearchResourceEncryptionKey", + "@azure/search-documents!AzureActiveDirectoryApplicationCredentials:interface": "Search.AzureActiveDirectoryApplicationCredentials", + "@azure/search-documents!SearchIndexerDataIdentity:interface": "Search.SearchIndexerDataIdentity", + "@azure/search-documents!SearchIndexerDataNoneIdentity:interface": "Search.SearchIndexerDataNoneIdentity", + "@azure/search-documents!SearchIndexerDataUserAssignedIdentity:interface": "Search.SearchIndexerDataUserAssignedIdentity", + "@azure/search-documents!ListSynonymMapsResult:interface": "Search.ListSynonymMapsResult", + "@azure/search-documents!SearchIndex:interface": "Search.SearchIndex", + "@azure/search-documents!SearchField:interface": "Search.SearchField", + "@azure/search-documents!ScoringProfile:interface": "Search.ScoringProfile", + "@azure/search-documents!TextWeights:interface": "Search.TextWeights", + "@azure/search-documents!ScoringFunction:interface": "Search.ScoringFunction", + "@azure/search-documents!DistanceScoringFunction:interface": "Search.DistanceScoringFunction", + "@azure/search-documents!DistanceScoringParameters:interface": "Search.DistanceScoringParameters", + "@azure/search-documents!FreshnessScoringFunction:interface": "Search.FreshnessScoringFunction", + "@azure/search-documents!FreshnessScoringParameters:interface": "Search.FreshnessScoringParameters", + "@azure/search-documents!MagnitudeScoringFunction:interface": "Search.MagnitudeScoringFunction", + "@azure/search-documents!MagnitudeScoringParameters:interface": "Search.MagnitudeScoringParameters", + "@azure/search-documents!TagScoringFunction:interface": "Search.TagScoringFunction", + "@azure/search-documents!TagScoringParameters:interface": "Search.TagScoringParameters", + "@azure/search-documents!CorsOptions:interface": "Search.CorsOptions", + "@azure/search-documents!SearchSuggester:interface": "Search.SearchSuggester", + "@azure/search-documents!LexicalAnalyzer:interface": "Search.LexicalAnalyzer", + "@azure/search-documents!CustomAnalyzer:interface": "Search.CustomAnalyzer", + "@azure/search-documents!PatternAnalyzer:interface": "Search.PatternAnalyzer", + "@azure/search-documents!LuceneStandardAnalyzer:interface": "Search.LuceneStandardAnalyzer", + "@azure/search-documents!StopAnalyzer:interface": "Search.StopAnalyzer", + "@azure/search-documents!LexicalTokenizer:interface": "Search.LexicalTokenizer", + "@azure/search-documents!ClassicTokenizer:interface": "Search.ClassicTokenizer", + "@azure/search-documents!EdgeNGramTokenizer:interface": "Search.EdgeNGramTokenizer", + "@azure/search-documents!KeywordTokenizer:interface": "Search.KeywordTokenizer", + "@azure/search-documents!MicrosoftLanguageTokenizer:interface": "Search.MicrosoftLanguageTokenizer", + "@azure/search-documents!MicrosoftLanguageStemmingTokenizer:interface": "Search.MicrosoftLanguageStemmingTokenizer", + "@azure/search-documents!NGramTokenizer:interface": "Search.NGramTokenizer", + "@azure/search-documents!PathHierarchyTokenizer:interface": "Search.PathHierarchyTokenizer", + "@azure/search-documents!PatternTokenizer:interface": "Search.PatternTokenizer", + "@azure/search-documents!LuceneStandardTokenizer:interface": "Search.LuceneStandardTokenizer", + "@azure/search-documents!UaxUrlEmailTokenizer:interface": "Search.UaxUrlEmailTokenizer", + "@azure/search-documents!TokenFilter:interface": "Search.TokenFilter", + "@azure/search-documents!AsciiFoldingTokenFilter:interface": "Search.AsciiFoldingTokenFilter", + "@azure/search-documents!CjkBigramTokenFilter:interface": "Search.CjkBigramTokenFilter", + "@azure/search-documents!CommonGramTokenFilter:interface": "Search.CommonGramTokenFilter", + "@azure/search-documents!DictionaryDecompounderTokenFilter:interface": "Search.DictionaryDecompounderTokenFilter", + "@azure/search-documents!EdgeNGramTokenFilter:interface": "Search.EdgeNGramTokenFilter", + "@azure/search-documents!ElisionTokenFilter:interface": "Search.ElisionTokenFilter", + "@azure/search-documents!KeepTokenFilter:interface": "Search.KeepTokenFilter", + "@azure/search-documents!KeywordMarkerTokenFilter:interface": "Search.KeywordMarkerTokenFilter", + "@azure/search-documents!LengthTokenFilter:interface": "Search.LengthTokenFilter", + "@azure/search-documents!LimitTokenFilter:interface": "Search.LimitTokenFilter", + "@azure/search-documents!NGramTokenFilter:interface": "Search.NGramTokenFilter", + "@azure/search-documents!PatternCaptureTokenFilter:interface": "Search.PatternCaptureTokenFilter", + "@azure/search-documents!PatternReplaceTokenFilter:interface": "Search.PatternReplaceTokenFilter", + "@azure/search-documents!PhoneticTokenFilter:interface": "Search.PhoneticTokenFilter", + "@azure/search-documents!ShingleTokenFilter:interface": "Search.ShingleTokenFilter", + "@azure/search-documents!SnowballTokenFilter:interface": "Search.SnowballTokenFilter", + "@azure/search-documents!StemmerTokenFilter:interface": "Search.StemmerTokenFilter", + "@azure/search-documents!StemmerOverrideTokenFilter:interface": "Search.StemmerOverrideTokenFilter", + "@azure/search-documents!StopwordsTokenFilter:interface": "Search.StopwordsTokenFilter", + "@azure/search-documents!SynonymTokenFilter:interface": "Search.SynonymTokenFilter", + "@azure/search-documents!TruncateTokenFilter:interface": "Search.TruncateTokenFilter", + "@azure/search-documents!UniqueTokenFilter:interface": "Search.UniqueTokenFilter", + "@azure/search-documents!WordDelimiterTokenFilter:interface": "Search.WordDelimiterTokenFilter", + "@azure/search-documents!CharFilter:interface": "Search.CharFilter", + "@azure/search-documents!MappingCharFilter:interface": "Search.MappingCharFilter", + "@azure/search-documents!PatternReplaceCharFilter:interface": "Search.PatternReplaceCharFilter", + "@azure/search-documents!LexicalNormalizer:interface": "Search.LexicalNormalizer", + "@azure/search-documents!CustomNormalizer:interface": "Search.CustomNormalizer", + "@azure/search-documents!SimilarityAlgorithm:interface": "Search.SimilarityAlgorithm", + "@azure/search-documents!ClassicSimilarity:interface": "Search.ClassicSimilarityAlgorithm", + "@azure/search-documents!BM25Similarity:interface": "Search.BM25SimilarityAlgorithm", + "@azure/search-documents!SemanticSearch:interface": "Search.SemanticSearch", + "@azure/search-documents!SemanticConfiguration:interface": "Search.SemanticConfiguration", + "@azure/search-documents!SemanticPrioritizedFields:interface": "Search.SemanticPrioritizedFields", + "@azure/search-documents!SemanticField:interface": "Search.SemanticField", + "@azure/search-documents!VectorSearch:interface": "Search.VectorSearch", + "@azure/search-documents!VectorSearchProfile:interface": "Search.VectorSearchProfile", + "@azure/search-documents!VectorSearchAlgorithmConfiguration:interface": "Search.VectorSearchAlgorithmConfiguration", + "@azure/search-documents!HnswAlgorithmConfiguration:interface": "Search.HnswAlgorithmConfiguration", + "@azure/search-documents!HnswParameters:interface": "Search.HnswParameters", + "@azure/search-documents!ExhaustiveKnnAlgorithmConfiguration:interface": "Search.ExhaustiveKnnAlgorithmConfiguration", + "@azure/search-documents!ExhaustiveKnnParameters:interface": "Search.ExhaustiveKnnParameters", + "@azure/search-documents!VectorSearchVectorizer:interface": "Search.VectorSearchVectorizer", + "@azure/search-documents!AzureOpenAIVectorizer:interface": "Search.AzureOpenAIVectorizer", + "@azure/search-documents!AzureOpenAIVectorizerParameters:interface": "Search.AzureOpenAIVectorizerParameters", + "@azure/search-documents!WebApiVectorizer:interface": "Search.WebApiVectorizer", + "@azure/search-documents!WebApiVectorizerParameters:interface": "Search.WebApiVectorizerParameters", + "@azure/search-documents!AIServicesVisionVectorizer:interface": "Search.AIServicesVisionVectorizer", + "@azure/search-documents!AIServicesVisionParameters:interface": "Search.AIServicesVisionParameters", + "@azure/search-documents!AzureMachineLearningVectorizer:interface": "Search.AMLVectorizer", + "@azure/search-documents!AzureMachineLearningParameters:interface": "Search.AMLParameters", + "@azure/search-documents!VectorSearchCompression:interface": "Search.VectorSearchCompression", + "@azure/search-documents!RescoringOptions:interface": "Search.RescoringOptions", + "@azure/search-documents!ScalarQuantizationCompression:interface": "Search.ScalarQuantizationCompression", + "@azure/search-documents!ScalarQuantizationParameters:interface": "Search.ScalarQuantizationParameters", + "@azure/search-documents!BinaryQuantizationCompression:interface": "Search.BinaryQuantizationCompression", + "@azure/search-documents!ListIndexesResult:interface": "Search.ListIndexesResult", + "@azure/search-documents!GetIndexStatisticsResult:interface": "Search.GetIndexStatisticsResult", + "@azure/search-documents!AnalyzeTextOptions:interface": "Search.AnalyzeRequest", + "@azure/search-documents!AnalyzeResult:interface": "Search.AnalyzeResult", + "@azure/search-documents!AnalyzedTokenInfo:interface": "Search.AnalyzedTokenInfo", + "@azure/search-documents!SearchAlias:interface": "Search.SearchAlias", + "@azure/search-documents!ListAliasesResult:interface": "Search.ListAliasesResult", + "@azure/search-documents!KnowledgeBase:interface": "Search.KnowledgeBase", + "@azure/search-documents!KnowledgeSourceReference:interface": "Search.KnowledgeSourceReference", + "@azure/search-documents!KnowledgeBaseModel:interface": "Search.KnowledgeBaseModel", + "@azure/search-documents!KnowledgeBaseAzureOpenAIModel:interface": "Search.KnowledgeBaseAzureOpenAIModel", + "@azure/search-documents!AzureOpenAiParameters:interface": "Search.AzureOpenAiParameters", + "@azure/search-documents!KnowledgeRetrievalReasoningEffort:interface": "Search.KnowledgeRetrievalReasoningEffort", + "@azure/search-documents!KnowledgeRetrievalMinimalReasoningEffort:interface": "Search.KnowledgeRetrievalMinimalReasoningEffort", + "@azure/search-documents!KnowledgeRetrievalLowReasoningEffort:interface": "Search.KnowledgeRetrievalLowReasoningEffort", + "@azure/search-documents!KnowledgeRetrievalMediumReasoningEffort:interface": "Search.KnowledgeRetrievalMediumReasoningEffort", + "@azure/search-documents!KnowledgeRetrievalHighReasoningEffort:interface": "Search.KnowledgeRetrievalHighReasoningEffort", + "@azure/search-documents!ListKnowledgeBasesResult:interface": "Search.ListKnowledgeBasesResult", + "@azure/search-documents!KnowledgeSource:interface": "Search.KnowledgeSource", + "@azure/search-documents!SearchIndexKnowledgeSource:interface": "Search.SearchIndexKnowledgeSource", + "@azure/search-documents!SearchIndexKnowledgeSourceParameters:interface": "Search.SearchIndexKnowledgeSourceParameters", + "@azure/search-documents!AzureBlobKnowledgeSource:interface": "Search.AzureBlobKnowledgeSource", + "@azure/search-documents!AzureBlobKnowledgeSourceParameters:interface": "Search.AzureBlobKnowledgeSourceParameters", + "@azure/search-documents!IndexingSchedule:interface": "Search.IndexingSchedule", + "@azure/search-documents!CreatedResources:interface": "Search.CreatedResources", + "@azure/search-documents!IndexedSharePointKnowledgeSource:interface": "Search.IndexedSharePointKnowledgeSource", + "@azure/search-documents!IndexedSharePointKnowledgeSourceParameters:interface": "Search.IndexedSharePointKnowledgeSourceParameters", + "@azure/search-documents!KnowledgeSourceIngestionParameters:interface": "Search.KnowledgeSourceIngestionParameters", + "@azure/search-documents!AIServices:interface": "Search.AIServices", + "@azure/search-documents!IndexedOneLakeKnowledgeSource:interface": "Search.IndexedOneLakeKnowledgeSource", + "@azure/search-documents!IndexedOneLakeKnowledgeSourceParameters:interface": "Search.IndexedOneLakeKnowledgeSourceParameters", + "@azure/search-documents!WebKnowledgeSource:interface": "Search.WebKnowledgeSource", + "@azure/search-documents!WebKnowledgeSourceParameters:interface": "Search.WebKnowledgeSourceParameters", + "@azure/search-documents!WebKnowledgeSourceDomains:interface": "Search.WebKnowledgeSourceDomains", + "@azure/search-documents!WebKnowledgeSourceDomain:interface": "Search.WebKnowledgeSourceDomain", + "@azure/search-documents!RemoteSharePointKnowledgeSource:interface": "Search.RemoteSharePointKnowledgeSource", + "@azure/search-documents!RemoteSharePointKnowledgeSourceParameters:interface": "Search.RemoteSharePointKnowledgeSourceParameters", + "@azure/search-documents!ListKnowledgeSourcesResult:interface": "Search.ListKnowledgeSourcesResult", + "@azure/search-documents!SearchServiceStatistics:interface": "Search.SearchServiceStatistics", + "@azure/search-documents!ServiceCounters:interface": "Search.SearchServiceCounters", + "@azure/search-documents!ResourceCounter:interface": "Search.ResourceCounter", + "@azure/search-documents!ServiceLimits:interface": "Search.SearchServiceLimits", + "@azure/search-documents!ServiceIndexersRuntime:interface": "Search.ServiceIndexersRuntime", + "@azure/search-documents!ListIndexStatsSummary:interface": "Search.ListIndexStatsSummary", + "@azure/search-documents!IndexStatisticsSummary:interface": "Search.IndexStatisticsSummary", + "@azure/search-documents!SearchIndexerDataSourceConnection:interface": "Search.SearchIndexerDataSource", + "@azure/search-documents!DataSourceCredentials:interface": "Search.DataSourceCredentials", + "@azure/search-documents!SearchIndexerDataContainer:interface": "Search.SearchIndexerDataContainer", + "@azure/search-documents!DataChangeDetectionPolicy:interface": "Search.DataChangeDetectionPolicy", + "@azure/search-documents!HighWaterMarkChangeDetectionPolicy:interface": "Search.HighWaterMarkChangeDetectionPolicy", + "@azure/search-documents!SqlIntegratedChangeTrackingPolicy:interface": "Search.SqlIntegratedChangeTrackingPolicy", + "@azure/search-documents!DataDeletionDetectionPolicy:interface": "Search.DataDeletionDetectionPolicy", + "@azure/search-documents!SoftDeleteColumnDeletionDetectionPolicy:interface": "Search.SoftDeleteColumnDeletionDetectionPolicy", + "@azure/search-documents!NativeBlobSoftDeleteDeletionDetectionPolicy:interface": "Search.NativeBlobSoftDeleteDeletionDetectionPolicy", + "@azure/search-documents!ListDataSourcesResult:interface": "Search.ListDataSourcesResult", + "@azure/search-documents!DocumentKeysOrIds:interface": "Search.DocumentKeysOrIds", + "@azure/search-documents!SearchIndexer:interface": "Search.SearchIndexer", + "@azure/search-documents!IndexingParameters:interface": "Search.IndexingParameters", + "@azure/search-documents!IndexingParametersConfiguration:interface": "Search.IndexingParametersConfiguration", + "@azure/search-documents!FieldMapping:interface": "Search.FieldMapping", + "@azure/search-documents!FieldMappingFunction:interface": "Search.FieldMappingFunction", + "@azure/search-documents!SearchIndexerCache:interface": "Search.SearchIndexerCache", + "@azure/search-documents!ListIndexersResult:interface": "Search.ListIndexersResult", + "@azure/search-documents!SearchIndexerStatus:interface": "Search.SearchIndexerStatus", + "@azure/search-documents!IndexerRuntime:interface": "Search.IndexerRuntime", + "@azure/search-documents!IndexerExecutionResult:interface": "Search.IndexerExecutionResult", + "@azure/search-documents!IndexerCurrentState:interface": "Search.IndexerCurrentState", + "@azure/search-documents!SearchIndexerError:interface": "Search.SearchIndexerError", + "@azure/search-documents!SearchIndexerWarning:interface": "Search.SearchIndexerWarning", + "@azure/search-documents!SearchIndexerLimits:interface": "Search.SearchIndexerLimits", + "@azure/search-documents!SearchIndexerSkillset:interface": "Search.SearchIndexerSkillset", + "@azure/search-documents!SearchIndexerSkill:interface": "Search.SearchIndexerSkill", + "@azure/search-documents!InputFieldMappingEntry:interface": "Search.InputFieldMappingEntry", + "@azure/search-documents!OutputFieldMappingEntry:interface": "Search.OutputFieldMappingEntry", + "@azure/search-documents!ConditionalSkill:interface": "Search.ConditionalSkill", + "@azure/search-documents!KeyPhraseExtractionSkill:interface": "Search.KeyPhraseExtractionSkill", + "@azure/search-documents!OcrSkill:interface": "Search.OcrSkill", + "@azure/search-documents!ImageAnalysisSkill:interface": "Search.ImageAnalysisSkill", + "@azure/search-documents!LanguageDetectionSkill:interface": "Search.LanguageDetectionSkill", + "@azure/search-documents!ShaperSkill:interface": "Search.ShaperSkill", + "@azure/search-documents!MergeSkill:interface": "Search.MergeSkill", + "@azure/search-documents!EntityRecognitionSkill:interface": "Search.EntityRecognitionSkill", + "@azure/search-documents!SentimentSkill:interface": "Search.SentimentSkill", + "@azure/search-documents!SentimentSkillV3:interface": "Search.SentimentSkillV3", + "@azure/search-documents!EntityLinkingSkill:interface": "Search.EntityLinkingSkill", + "@azure/search-documents!EntityRecognitionSkillV3:interface": "Search.EntityRecognitionSkillV3", + "@azure/search-documents!PIIDetectionSkill:interface": "Search.PIIDetectionSkill", + "@azure/search-documents!SplitSkill:interface": "Search.SplitSkill", + "@azure/search-documents!AzureOpenAITokenizerParameters:interface": "Search.AzureOpenAITokenizerParameters", + "@azure/search-documents!CustomEntityLookupSkill:interface": "Search.CustomEntityLookupSkill", + "@azure/search-documents!CustomEntity:interface": "Search.CustomEntity", + "@azure/search-documents!CustomEntityAlias:interface": "Search.CustomEntityAlias", + "@azure/search-documents!TextTranslationSkill:interface": "Search.TextTranslationSkill", + "@azure/search-documents!DocumentExtractionSkill:interface": "Search.DocumentExtractionSkill", + "@azure/search-documents!DocumentIntelligenceLayoutSkill:interface": "Search.DocumentIntelligenceLayoutSkill", + "@azure/search-documents!DocumentIntelligenceLayoutSkillChunkingProperties:interface": "Search.DocumentIntelligenceLayoutSkillChunkingProperties", + "@azure/search-documents!WebApiSkill:interface": "Search.WebApiSkill", + "@azure/search-documents!AzureMachineLearningSkill:interface": "Search.AzureMachineLearningSkill", + "@azure/search-documents!AzureOpenAIEmbeddingSkill:interface": "Search.AzureOpenAIEmbeddingSkill", + "@azure/search-documents!VisionVectorizeSkill:interface": "Search.VisionVectorizeSkill", + "@azure/search-documents!ContentUnderstandingSkill:interface": "Search.ContentUnderstandingSkill", + "@azure/search-documents!ContentUnderstandingSkillChunkingProperties:interface": "Search.ContentUnderstandingSkillChunkingProperties", + "@azure/search-documents!ChatCompletionSkill:interface": "Search.ChatCompletionSkill", + "@azure/search-documents!WebApiHttpHeaders:interface": "Search.WebApiHttpHeaders", + "@azure/search-documents!CommonModelParameters:interface": "Search.ChatCompletionCommonModelParameters", + "@azure/search-documents!ChatCompletionResponseFormat:interface": "Search.ChatCompletionResponseFormat", + "@azure/search-documents!ChatCompletionSchemaProperties:interface": "Search.ChatCompletionSchemaProperties", + "@azure/search-documents!ChatCompletionSchema:interface": "Search.ChatCompletionSchema", + "@azure/search-documents!CognitiveServicesAccount:interface": "Search.CognitiveServicesAccount", + "@azure/search-documents!DefaultCognitiveServicesAccount:interface": "Search.DefaultCognitiveServicesAccount", + "@azure/search-documents!CognitiveServicesAccountKey:interface": "Search.CognitiveServicesAccountKey", + "@azure/search-documents!AIServicesAccountKey:interface": "Search.AIServicesAccountKey", + "@azure/search-documents!AIServicesAccountIdentity:interface": "Search.AIServicesAccountIdentity", + "@azure/search-documents!SearchIndexerKnowledgeStore:interface": "Search.SearchIndexerKnowledgeStore", + "@azure/search-documents!SearchIndexerKnowledgeStoreProjection:interface": "Search.SearchIndexerKnowledgeStoreProjection", + "@azure/search-documents!SearchIndexerKnowledgeStoreTableProjectionSelector:interface": "Search.SearchIndexerKnowledgeStoreTableProjectionSelector", + "@azure/search-documents!SearchIndexerKnowledgeStoreProjectionSelector:interface": "Search.SearchIndexerKnowledgeStoreProjectionSelector", + "@azure/search-documents!SearchIndexerKnowledgeStoreObjectProjectionSelector:interface": "Search.SearchIndexerKnowledgeStoreObjectProjectionSelector", + "@azure/search-documents!SearchIndexerKnowledgeStoreBlobProjectionSelector:interface": "Search.SearchIndexerKnowledgeStoreBlobProjectionSelector", + "@azure/search-documents!SearchIndexerKnowledgeStoreFileProjectionSelector:interface": "Search.SearchIndexerKnowledgeStoreFileProjectionSelector", + "@azure/search-documents!SearchIndexerKnowledgeStoreParameters:interface": "Search.SearchIndexerKnowledgeStoreParameters", + "@azure/search-documents!SearchIndexerIndexProjection:interface": "Search.SearchIndexerIndexProjection", + "@azure/search-documents!SearchIndexerIndexProjectionSelector:interface": "Search.SearchIndexerIndexProjectionSelector", + "@azure/search-documents!SearchIndexerIndexProjectionsParameters:interface": "Search.SearchIndexerIndexProjectionsParameters", + "@azure/search-documents!ListSkillsetsResult:interface": "Search.ListSkillsetsResult", + "@azure/search-documents!SkillNames:interface": "Search.SkillNames", + "@azure/search-documents!KnowledgeBaseRetrievalRequest:interface": "Search.KnowledgeBaseRetrievalRequest", + "@azure/search-documents!KnowledgeBaseMessage:interface": "Search.KnowledgeBaseMessage", + "@azure/search-documents!KnowledgeBaseMessageContent:interface": "Search.KnowledgeBaseMessageContent", + "@azure/search-documents!KnowledgeBaseMessageTextContent:interface": "Search.KnowledgeBaseMessageTextContent", + "@azure/search-documents!KnowledgeBaseMessageImageContent:interface": "Search.KnowledgeBaseMessageImageContent", + "@azure/search-documents!KnowledgeBaseImageContent:interface": "Search.KnowledgeBaseImageContent", + "@azure/search-documents!KnowledgeRetrievalIntent:interface": "Search.KnowledgeRetrievalIntent", + "@azure/search-documents!KnowledgeRetrievalSemanticIntent:interface": "Search.KnowledgeRetrievalSemanticIntent", + "@azure/search-documents!KnowledgeSourceParams:interface": "Search.KnowledgeSourceParams", + "@azure/search-documents!SearchIndexKnowledgeSourceParams:interface": "Search.SearchIndexKnowledgeSourceParams", + "@azure/search-documents!AzureBlobKnowledgeSourceParams:interface": "Search.AzureBlobKnowledgeSourceParams", + "@azure/search-documents!IndexedSharePointKnowledgeSourceParams:interface": "Search.IndexedSharePointKnowledgeSourceParams", + "@azure/search-documents!IndexedOneLakeKnowledgeSourceParams:interface": "Search.IndexedOneLakeKnowledgeSourceParams", + "@azure/search-documents!WebKnowledgeSourceParams:interface": "Search.WebKnowledgeSourceParams", + "@azure/search-documents!RemoteSharePointKnowledgeSourceParams:interface": "Search.RemoteSharePointKnowledgeSourceParams", + "@azure/search-documents!KnowledgeBaseRetrievalResponse:interface": "Search.KnowledgeBaseRetrievalResponse", + "@azure/search-documents!KnowledgeBaseActivityRecord:interface": "Search.KnowledgeBaseActivityRecord", + "@azure/search-documents!KnowledgeBaseErrorDetail:interface": "Search.KnowledgeBaseErrorDetail", + "@azure/search-documents!KnowledgeBaseErrorAdditionalInfo:interface": "Search.KnowledgeBaseErrorAdditionalInfo", + "@azure/search-documents!KnowledgeBaseModelQueryPlanningActivityRecord:interface": "Search.KnowledgeBaseModelQueryPlanningActivityRecord", + "@azure/search-documents!KnowledgeBaseModelAnswerSynthesisActivityRecord:interface": "Search.KnowledgeBaseModelAnswerSynthesisActivityRecord", + "@azure/search-documents!KnowledgeBaseAgenticReasoningActivityRecord:interface": "Search.KnowledgeBaseAgenticReasoningActivityRecord", + "@azure/search-documents!KnowledgeBaseReference:interface": "Search.KnowledgeBaseReference", + "@azure/search-documents!KnowledgeBaseSearchIndexReference:interface": "Search.KnowledgeBaseSearchIndexReference", + "@azure/search-documents!KnowledgeBaseAzureBlobReference:interface": "Search.KnowledgeBaseAzureBlobReference", + "@azure/search-documents!KnowledgeBaseIndexedSharePointReference:interface": "Search.KnowledgeBaseIndexedSharePointReference", + "@azure/search-documents!KnowledgeBaseIndexedOneLakeReference:interface": "Search.KnowledgeBaseIndexedOneLakeReference", + "@azure/search-documents!KnowledgeBaseWebReference:interface": "Search.KnowledgeBaseWebReference", + "@azure/search-documents!KnowledgeBaseRemoteSharePointReference:interface": "Search.KnowledgeBaseRemoteSharePointReference", + "@azure/search-documents!SharePointSensitivityLabelInfo:interface": "Search.SharePointSensitivityLabelInfo", + "@azure/search-documents!Error:interface": "Azure.Core.Foundations.Error", + "@azure/search-documents!InnerError:interface": "Azure.Core.Foundations.InnerError", + "@azure/search-documents!IndexerResyncBody:interface": "Search.IndexerResyncBody", + "@azure/search-documents!KnowledgeSourceStatus:interface": "Search.KnowledgeSourceStatus", + "@azure/search-documents!SynchronizationState:interface": "Search.SynchronizationState", + "@azure/search-documents!CompletedSynchronizationState:interface": "Search.CompletedSynchronizationState", + "@azure/search-documents!KnowledgeSourceStatistics:interface": "Search.KnowledgeSourceStatistics", + "@azure/search-documents!KnownQueryType:enum": "Search.QueryType", + "@azure/search-documents!KnownSearchMode:enum": "Search.SearchMode", + "@azure/search-documents!KnownScoringStatistics:enum": "Search.ScoringStatistics", + "@azure/search-documents!KnownSemanticErrorMode:enum": "Search.SemanticErrorMode", + "@azure/search-documents!KnownQueryAnswerType:enum": "Search.QueryAnswerType", + "@azure/search-documents!KnownQueryCaptionType:enum": "Search.QueryCaptionType", + "@azure/search-documents!KnownQueryRewritesType:enum": "Search.QueryRewritesType", + "@azure/search-documents!KnownQueryDebugMode:enum": "Search.QueryDebugMode", + "@azure/search-documents!KnownQueryLanguage:enum": "Search.QueryLanguage", + "@azure/search-documents!KnownQuerySpellerType:enum": "Search.QuerySpellerType", + "@azure/search-documents!KnownVectorThresholdKind:enum": "Search.VectorThresholdKind", + "@azure/search-documents!KnownVectorQueryKind:enum": "Search.VectorQueryKind", + "@azure/search-documents!KnownVectorFilterMode:enum": "Search.VectorFilterMode", + "@azure/search-documents!KnownHybridCountAndFacetMode:enum": "Search.HybridCountAndFacetMode", + "@azure/search-documents!KnownSemanticFieldState:enum": "Search.SemanticFieldState", + "@azure/search-documents!KnownSemanticErrorReason:enum": "Search.SemanticErrorReason", + "@azure/search-documents!KnownSemanticSearchResultsType:enum": "Search.SemanticSearchResultsType", + "@azure/search-documents!KnownSemanticQueryRewritesResultType:enum": "Search.SemanticQueryRewritesResultType", + "@azure/search-documents!KnownIndexActionType:enum": "Search.IndexActionType", + "@azure/search-documents!KnownAutocompleteMode:enum": "Search.AutocompleteMode", + "@azure/search-documents!KnownVersions:enum": "Search.Versions", + "@azure/search-documents!KnownSearchFieldDataType:enum": "Search.SearchFieldDataType", + "@azure/search-documents!KnownPermissionFilter:enum": "Search.PermissionFilter", + "@azure/search-documents!KnownLexicalAnalyzerName:enum": "Search.LexicalAnalyzerName", + "@azure/search-documents!KnownLexicalNormalizerName:enum": "Search.LexicalNormalizerName", + "@azure/search-documents!KnownVectorEncodingFormat:enum": "Search.VectorEncodingFormat", + "@azure/search-documents!KnownScoringFunctionInterpolation:enum": "Search.ScoringFunctionInterpolation", + "@azure/search-documents!KnownScoringFunctionAggregation:enum": "Search.ScoringFunctionAggregation", + "@azure/search-documents!KnownLexicalTokenizerName:enum": "Search.LexicalTokenizerName", + "@azure/search-documents!KnownTokenFilterName:enum": "Search.TokenFilterName", + "@azure/search-documents!KnownCharFilterName:enum": "Search.CharFilterName", + "@azure/search-documents!KnownRegexFlags:enum": "Search.RegexFlags", + "@azure/search-documents!KnownTokenCharacterKind:enum": "Search.TokenCharacterKind", + "@azure/search-documents!KnownMicrosoftTokenizerLanguage:enum": "Search.MicrosoftTokenizerLanguage", + "@azure/search-documents!KnownMicrosoftStemmingTokenizerLanguage:enum": "Search.MicrosoftStemmingTokenizerLanguage", + "@azure/search-documents!KnownCjkBigramTokenFilterScripts:enum": "Search.CjkBigramTokenFilterScripts", + "@azure/search-documents!KnownEdgeNGramTokenFilterSide:enum": "Search.EdgeNGramTokenFilterSide", + "@azure/search-documents!KnownPhoneticEncoder:enum": "Search.PhoneticEncoder", + "@azure/search-documents!KnownSnowballTokenFilterLanguage:enum": "Search.SnowballTokenFilterLanguage", + "@azure/search-documents!KnownStemmerTokenFilterLanguage:enum": "Search.StemmerTokenFilterLanguage", + "@azure/search-documents!KnownStopwordsList:enum": "Search.StopwordsList", + "@azure/search-documents!KnownRankingOrder:enum": "Search.RankingOrder", + "@azure/search-documents!KnownVectorSearchAlgorithmKind:enum": "Search.VectorSearchAlgorithmKind", + "@azure/search-documents!KnownVectorSearchAlgorithmMetric:enum": "Search.VectorSearchAlgorithmMetric", + "@azure/search-documents!KnownVectorSearchVectorizerKind:enum": "Search.VectorSearchVectorizerKind", + "@azure/search-documents!KnownAzureOpenAIModelName:enum": "Search.AzureOpenAIModelName", + "@azure/search-documents!KnownAIFoundryModelCatalogName:enum": "Search.AIFoundryModelCatalogName", + "@azure/search-documents!KnownVectorSearchCompressionRescoreStorageMethod:enum": "Search.VectorSearchCompressionRescoreStorageMethod", + "@azure/search-documents!KnownVectorSearchCompressionKind:enum": "Search.VectorSearchCompressionKind", + "@azure/search-documents!KnownVectorSearchCompressionTarget:enum": "Search.VectorSearchCompressionTarget", + "@azure/search-documents!KnownSearchIndexPermissionFilterOption:enum": "Search.SearchIndexPermissionFilterOption", + "@azure/search-documents!KnownKnowledgeBaseModelKind:enum": "Search.KnowledgeBaseModelKind", + "@azure/search-documents!KnownKnowledgeRetrievalReasoningEffortKind:enum": "Search.KnowledgeRetrievalReasoningEffortKind", + "@azure/search-documents!KnownKnowledgeRetrievalOutputMode:enum": "Search.KnowledgeRetrievalOutputMode", + "@azure/search-documents!KnownKnowledgeSourceKind:enum": "Search.KnowledgeSourceKind", + "@azure/search-documents!KnownBlobIndexerDataToExtract:enum": "Search.BlobIndexerDataToExtract", + "@azure/search-documents!KnownBlobIndexerImageAction:enum": "Search.BlobIndexerImageAction", + "@azure/search-documents!KnownBlobIndexerParsingMode:enum": "Search.BlobIndexerParsingMode", + "@azure/search-documents!KnownMarkdownHeaderDepth:enum": "Search.MarkdownHeaderDepth", + "@azure/search-documents!KnownMarkdownParsingSubmode:enum": "Search.MarkdownParsingSubmode", + "@azure/search-documents!KnownBlobIndexerPDFTextRotationAlgorithm:enum": "Search.BlobIndexerPDFTextRotationAlgorithm", + "@azure/search-documents!KnownKnowledgeSourceIngestionPermissionOption:enum": "Search.KnowledgeSourceIngestionPermissionOption", + "@azure/search-documents!KnownKnowledgeSourceContentExtractionMode:enum": "Search.KnowledgeSourceContentExtractionMode", + "@azure/search-documents!KnownSearchIndexerDataSourceType:enum": "Search.SearchIndexerDataSourceType", + "@azure/search-documents!KnownIndexerPermissionOption:enum": "Search.IndexerPermissionOption", + "@azure/search-documents!KnownIndexerExecutionEnvironment:enum": "Search.IndexerExecutionEnvironment", + "@azure/search-documents!KnownIndexerStatus:enum": "Search.IndexerStatus", + "@azure/search-documents!KnownIndexerExecutionStatus:enum": "Search.IndexerExecutionStatus", + "@azure/search-documents!KnownIndexerExecutionStatusDetail:enum": "Search.IndexerExecutionStatusDetail", + "@azure/search-documents!KnownIndexingMode:enum": "Search.IndexingMode", + "@azure/search-documents!KnownKeyPhraseExtractionSkillLanguage:enum": "Search.KeyPhraseExtractionSkillLanguage", + "@azure/search-documents!KnownOcrSkillLanguage:enum": "Search.OcrSkillLanguage", + "@azure/search-documents!KnownOcrLineEnding:enum": "Search.OcrLineEnding", + "@azure/search-documents!KnownImageAnalysisSkillLanguage:enum": "Search.ImageAnalysisSkillLanguage", + "@azure/search-documents!KnownVisualFeature:enum": "Search.VisualFeature", + "@azure/search-documents!KnownImageDetail:enum": "Search.ImageDetail", + "@azure/search-documents!KnownEntityCategory:enum": "Search.EntityCategory", + "@azure/search-documents!KnownEntityRecognitionSkillLanguage:enum": "Search.EntityRecognitionSkillLanguage", + "@azure/search-documents!KnownSentimentSkillLanguage:enum": "Search.SentimentSkillLanguage", + "@azure/search-documents!KnownPIIDetectionSkillMaskingMode:enum": "Search.PIIDetectionSkillMaskingMode", + "@azure/search-documents!KnownSplitSkillLanguage:enum": "Search.SplitSkillLanguage", + "@azure/search-documents!KnownTextSplitMode:enum": "Search.TextSplitMode", + "@azure/search-documents!KnownSplitSkillUnit:enum": "Search.SplitSkillUnit", + "@azure/search-documents!KnownSplitSkillEncoderModelName:enum": "Search.SplitSkillEncoderModelName", + "@azure/search-documents!KnownCustomEntityLookupSkillLanguage:enum": "Search.CustomEntityLookupSkillLanguage", + "@azure/search-documents!KnownTextTranslationSkillLanguage:enum": "Search.TextTranslationSkillLanguage", + "@azure/search-documents!KnownDocumentIntelligenceLayoutSkillOutputFormat:enum": "Search.DocumentIntelligenceLayoutSkillOutputFormat", + "@azure/search-documents!KnownDocumentIntelligenceLayoutSkillOutputMode:enum": "Search.DocumentIntelligenceLayoutSkillOutputMode", + "@azure/search-documents!KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth:enum": "Search.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth", + "@azure/search-documents!KnownDocumentIntelligenceLayoutSkillExtractionOptions:enum": "Search.DocumentIntelligenceLayoutSkillExtractionOptions", + "@azure/search-documents!KnownDocumentIntelligenceLayoutSkillChunkingUnit:enum": "Search.DocumentIntelligenceLayoutSkillChunkingUnit", + "@azure/search-documents!KnownContentUnderstandingSkillExtractionOptions:enum": "Search.ContentUnderstandingSkillExtractionOptions", + "@azure/search-documents!KnownContentUnderstandingSkillChunkingUnit:enum": "Search.ContentUnderstandingSkillChunkingUnit", + "@azure/search-documents!KnownChatCompletionExtraParametersBehavior:enum": "Search.ChatCompletionExtraParametersBehavior", + "@azure/search-documents!KnownChatCompletionResponseFormatType:enum": "Search.ChatCompletionResponseFormatType", + "@azure/search-documents!KnownIndexProjectionMode:enum": "Search.IndexProjectionMode", + "@azure/search-documents!KnownKnowledgeBaseMessageContentType:enum": "Search.KnowledgeBaseMessageContentType", + "@azure/search-documents!KnownKnowledgeRetrievalIntentType:enum": "Search.KnowledgeRetrievalIntentType", + "@azure/search-documents!KnownKnowledgeBaseActivityRecordType:enum": "Search.KnowledgeBaseActivityRecordType", + "@azure/search-documents!KnownKnowledgeBaseReferenceType:enum": "Search.KnowledgeBaseReferenceType", + "@azure/search-documents!KnownIndexerResyncOption:enum": "Search.IndexerResyncOption", + "@azure/search-documents!KnownKnowledgeSourceSynchronizationStatus:enum": "Search.KnowledgeSourceSynchronizationStatus", + "@azure/search-documents!SearchClient#autocompletePost:member(1)": "Customizations.SearchClient.Documents.autocompletePost", + "@azure/search-documents!SearchClient#autocompleteGet:member(1)": "Customizations.SearchClient.Documents.autocompleteGet", + "@azure/search-documents!SearchClient#index:member(1)": "Customizations.SearchClient.Documents.index", + "@azure/search-documents!SearchClient#suggestPost:member(1)": "Customizations.SearchClient.Documents.suggestPost", + "@azure/search-documents!SearchClient#suggestGet:member(1)": "Customizations.SearchClient.Documents.suggestGet", + "@azure/search-documents!SearchClient#getDocument:member(1)": "Customizations.SearchClient.Documents.get", + "@azure/search-documents!SearchClient#searchPost:member(1)": "Customizations.SearchClient.Documents.searchPost", + "@azure/search-documents!SearchClient#searchGet:member(1)": "Customizations.SearchClient.Documents.searchGet", + "@azure/search-documents!SearchClient#getDocumentCount:member(1)": "Customizations.SearchClient.Documents.count", + "@azure/search-documents!SearchIndexClient#listIndexStatsSummary:member(1)": "Customizations.SearchIndexClient.Root.getIndexStatsSummary", + "@azure/search-documents!SearchIndexClient#getServiceStatistics:member(1)": "Customizations.SearchIndexClient.Root.getServiceStatistics", + "@azure/search-documents!SearchIndexClient#createKnowledgeSource:member(1)": "Customizations.SearchIndexClient.Sources.create", + "@azure/search-documents!SearchIndexClient#listKnowledgeSources:member(1)": "Customizations.SearchIndexClient.Sources.list", + "@azure/search-documents!SearchIndexClient#getKnowledgeSource:member(1)": "Customizations.SearchIndexClient.Sources.get", + "@azure/search-documents!SearchIndexClient#deleteKnowledgeSource:member(1)": "Customizations.SearchIndexClient.Sources.delete", + "@azure/search-documents!SearchIndexClient#createOrUpdateKnowledgeSource:member(1)": "Customizations.SearchIndexClient.Sources.createOrUpdate", + "@azure/search-documents!SearchIndexClient#createKnowledgeBase:member(1)": "Customizations.SearchIndexClient.KnowledgeBases.create", + "@azure/search-documents!SearchIndexClient#listKnowledgeBases:member(1)": "Customizations.SearchIndexClient.KnowledgeBases.list", + "@azure/search-documents!SearchIndexClient#getKnowledgeBase:member(1)": "Customizations.SearchIndexClient.KnowledgeBases.get", + "@azure/search-documents!SearchIndexClient#deleteKnowledgeBase:member(1)": "Customizations.SearchIndexClient.KnowledgeBases.delete", + "@azure/search-documents!SearchIndexClient#createOrUpdateKnowledgeBase:member(1)": "Customizations.SearchIndexClient.KnowledgeBases.createOrUpdate", + "@azure/search-documents!SearchIndexClient#createAlias:member(1)": "Customizations.SearchIndexClient.Aliases.create", + "@azure/search-documents!SearchIndexClient#listAliases:member(1)": "Customizations.SearchIndexClient.Aliases.list", + "@azure/search-documents!SearchIndexClient#getAlias:member(1)": "Customizations.SearchIndexClient.Aliases.get", + "@azure/search-documents!SearchIndexClient#deleteAlias:member(1)": "Customizations.SearchIndexClient.Aliases.delete", + "@azure/search-documents!SearchIndexClient#createOrUpdateAlias:member(1)": "Customizations.SearchIndexClient.Aliases.createOrUpdate", + "@azure/search-documents!SearchIndexClient#analyzeText:member(1)": "Customizations.SearchIndexClient.Indexes.analyze", + "@azure/search-documents!SearchIndexClient#getIndexStatistics:member(1)": "Customizations.SearchIndexClient.Indexes.getStatistics", + "@azure/search-documents!SearchIndexClient#createIndex:member(1)": "Customizations.SearchIndexClient.Indexes.create", + "@azure/search-documents!SearchIndexClient#listIndexes:member(1)": "Customizations.SearchIndexClient.Indexes.list", + "@azure/search-documents!SearchIndexClient#getIndex:member(1)": "Customizations.SearchIndexClient.Indexes.get", + "@azure/search-documents!SearchIndexClient#deleteIndex:member(1)": "Customizations.SearchIndexClient.Indexes.delete", + "@azure/search-documents!SearchIndexClient#createOrUpdateIndex:member(1)": "Customizations.SearchIndexClient.Indexes.createOrUpdate", + "@azure/search-documents!SearchIndexClient#createSynonymMap:member(1)": "Customizations.SearchIndexClient.SynonymMaps.create", + "@azure/search-documents!SearchIndexClient#getSynonymMaps:member(1)": "Customizations.SearchIndexClient.SynonymMaps.list", + "@azure/search-documents!SearchIndexClient#getSynonymMap:member(1)": "Customizations.SearchIndexClient.SynonymMaps.get", + "@azure/search-documents!SearchIndexClient#deleteSynonymMap:member(1)": "Customizations.SearchIndexClient.SynonymMaps.delete", + "@azure/search-documents!SearchIndexClient#createOrUpdateSynonymMap:member(1)": "Customizations.SearchIndexClient.SynonymMaps.createOrUpdate", + "@azure/search-documents!SearchIndexerClient#resetSkills:member(1)": "Customizations.SearchIndexerClient.Skillsets.resetSkills", + "@azure/search-documents!SearchIndexerClient#createSkillset:member(1)": "Customizations.SearchIndexerClient.Skillsets.create", + "@azure/search-documents!SearchIndexerClient#getSkillsets:member(1)": "Customizations.SearchIndexerClient.Skillsets.list", + "@azure/search-documents!SearchIndexerClient#getSkillset:member(1)": "Customizations.SearchIndexerClient.Skillsets.get", + "@azure/search-documents!SearchIndexerClient#deleteSkillset:member(1)": "Customizations.SearchIndexerClient.Skillsets.delete", + "@azure/search-documents!SearchIndexerClient#createOrUpdateSkillset:member(1)": "Customizations.SearchIndexerClient.Skillsets.createOrUpdate", + "@azure/search-documents!SearchIndexerClient#getIndexerStatus:member(1)": "Customizations.SearchIndexerClient.Indexers.getStatus", + "@azure/search-documents!SearchIndexerClient#createIndexer:member(1)": "Customizations.SearchIndexerClient.Indexers.create", + "@azure/search-documents!SearchIndexerClient#getIndexers:member(1)": "Customizations.SearchIndexerClient.Indexers.list", + "@azure/search-documents!SearchIndexerClient#getIndexer:member(1)": "Customizations.SearchIndexerClient.Indexers.get", + "@azure/search-documents!SearchIndexerClient#deleteIndexer:member(1)": "Customizations.SearchIndexerClient.Indexers.delete", + "@azure/search-documents!SearchIndexerClient#createOrUpdateIndexer:member(1)": "Customizations.SearchIndexerClient.Indexers.createOrUpdate", + "@azure/search-documents!SearchIndexerClient#runIndexer:member(1)": "Customizations.SearchIndexerClient.Indexers.run", + "@azure/search-documents!SearchIndexerClient#resetDocuments:member(1)": "Customizations.SearchIndexerClient.Indexers.resetDocs", + "@azure/search-documents!SearchIndexerClient#resync:member(1)": "Customizations.SearchIndexerClient.Indexers.resync", + "@azure/search-documents!SearchIndexerClient#resetIndexer:member(1)": "Customizations.SearchIndexerClient.Indexers.reset", + "@azure/search-documents!SearchIndexerClient#createDataSourceConnection:member(1)": "Customizations.SearchIndexerClient.DataSources.create", + "@azure/search-documents!SearchIndexerClient#getDataSourceConnections:member(1)": "Customizations.SearchIndexerClient.DataSources.list", + "@azure/search-documents!SearchIndexerClient#getDataSourceConnection:member(1)": "Customizations.SearchIndexerClient.DataSources.get", + "@azure/search-documents!SearchIndexerClient#deleteDataSourceConnection:member(1)": "Customizations.SearchIndexerClient.DataSources.delete", + "@azure/search-documents!SearchIndexerClient#createOrUpdateDataSourceConnection:member(1)": "Customizations.SearchIndexerClient.DataSources.createOrUpdate", + "@azure/search-documents!KnowledgeBaseRetrievalClient#retrieve:member(1)": "Customizations.KnowledgeBaseRetrievalClient.KnowledgeRetrieval.retrieve" + } + } +} diff --git a/sdk/search/search-documents/package.json b/sdk/search/search-documents/package.json index c1713c06e69a..1d3dac4e82e9 100644 --- a/sdk/search/search-documents/package.json +++ b/sdk/search/search-documents/package.json @@ -1,84 +1,93 @@ { "name": "@azure/search-documents", "version": "12.3.0-beta.1", - "description": "Azure client library to use AI Search for node.js and browser.", - "sdk-type": "client", - "main": "./dist/commonjs/index.js", - "module": "./dist/esm/index.js", - "types": "./dist/commonjs/index.d.ts", - "scripts": { - "build": "npm run clean && dev-tool run build-package && dev-tool run extract-api", - "build:samples": "tsc -p tsconfig.samples.json", - "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.{ts,mts}\" \"test/**/*.{ts,mts}\" \"samples-dev/**/*.{ts,mts}\" \"*.{js,mjs,json}\"", - "clean": "rimraf --glob dist dist-* temp types *.tgz *.log", - "execute:samples": "dev-tool samples run samples-dev", - "extract-api": "dev-tool run build-package && dev-tool run extract-api", - "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.{ts,mts}\" \"test/**/*.{ts,mts}\" \"samples-dev/**/*.{ts,mts}\" \"*.{js,mjs,json}\"", - "generate:client": "autorest --typescript swagger/Service.md & autorest --typescript swagger/Data.md & autorest --typescript swagger/KnowledgeBase.md & wait", - "generate:embeddings": "ts-node scripts/generateSampleEmbeddings.ts", - "lint": "eslint package.json src test samples-dev", - "lint:fix": "eslint package.json src test samples-dev --fix --fix-type [problem,suggestion]", - "pack": "pnpm pack 2>&1", - "test": "npm run test:node && npm run test:browser", - "test:browser": "npm run clean && dev-tool run build-package && dev-tool run build-test && dev-tool run test:vitest --browser", - "test:node": "dev-tool run test:vitest --test-proxy-debug", - "update-snippets": "dev-tool run update-snippets" + "description": "A generated SDK for KnowledgeBaseRetrievalClient.", + "engines": { + "node": ">=20.0.0" }, + "sideEffects": false, + "autoPublish": false, + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts", + "./search": "./src/search/index.ts", + "./search/api": "./src/search/api/index.ts", + "./searchIndex": "./src/searchIndex/index.ts", + "./searchIndex/api": "./src/searchIndex/api/index.ts", + "./searchIndexer": "./src/searchIndexer/index.ts", + "./searchIndexer/api": "./src/searchIndexer/api/index.ts", + "./knowledgeBaseRetrieval": "./src/knowledgeBaseRetrieval/index.ts", + "./knowledgeBaseRetrieval/api": "./src/knowledgeBaseRetrieval/api/index.ts", + "./models": "./src/models/index.ts", + "./models/azure/search/documents": "./src/models/azure/search/documents/index.ts", + "./models/azure/search/documents/indexes": "./src/models/azure/search/documents/indexes/index.ts", + "./models/azure/search/documents/knowledgeBase": "./src/models/azure/search/documents/knowledgeBase/index.ts" + }, + "dialects": [ + "esm", + "commonjs" + ], + "esmDialects": [ + "browser", + "react-native" + ], + "selfLink": false, + "project": "../../../tsconfig.src.build.json" + }, + "type": "module", + "browser": "./dist/browser/index.js", + "react-native": "./dist/react-native/index.js", + "keywords": [ + "node", + "azure", + "cloud", + "typescript", + "browser", + "isomorphic" + ], + "author": "Microsoft Corporation", + "license": "MIT", "files": [ "dist/", + "!dist/**/*.d.*ts.map", "README.md", "LICENSE" ], - "browser": "./dist/browser/index.js", + "sdk-type": "client", + "repository": "github:Azure/azure-sdk-for-js", + "bugs": { + "url": "https://github.com/Azure/azure-sdk-for-js/issues" + }, + "homepage": "https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/search/search-documents/README.md", + "prettier": "@azure/eslint-plugin-azure-sdk/prettier.json", "//metadata": { "constantPaths": [ { - "path": "src/constants.ts", - "prefix": "SDK_VERSION" - }, - { - "path": "src/generated/data/searchClient.ts", - "prefix": "packageDetails" + "path": "/home/maorleger/workspace/azure-sdk-for-js/sdk/search/search-documents/generated/search/api/searchContext.ts", + "prefix": "userAgentInfo" }, { - "path": "src/generated/service/searchServiceClient.ts", - "prefix": "packageDetails" + "path": "/home/maorleger/workspace/azure-sdk-for-js/sdk/search/search-documents/generated/searchIndex/api/searchIndexContext.ts", + "prefix": "userAgentInfo" }, { - "path": "swagger/Data.md", - "prefix": "package-version" + "path": "/home/maorleger/workspace/azure-sdk-for-js/sdk/search/search-documents/generated/searchIndexer/api/searchIndexerContext.ts", + "prefix": "userAgentInfo" }, { - "path": "swagger/Service.md", - "prefix": "package-version" + "path": "/home/maorleger/workspace/azure-sdk-for-js/sdk/search/search-documents/generated/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts", + "prefix": "userAgentInfo" } ] }, - "repository": "github:Azure/azure-sdk-for-js", - "keywords": [ - "azure", - "cloud" - ], - "author": "Microsoft Corporation", - "license": "MIT", - "bugs": { - "url": "https://github.com/Azure/azure-sdk-for-js/issues" - }, - "engines": { - "node": ">=20.0.0" - }, - "homepage": "https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/search/search-documents", - "sideEffects": false, "dependencies": { + "@azure/core-util": "^1.12.0", + "@azure-rest/core-client": "^2.3.1", "@azure/core-auth": "^1.9.0", - "@azure/core-client": "^1.9.2", - "@azure/core-http-compat": "^2.1.2", - "@azure/core-paging": "^1.6.2", - "@azure/core-rest-pipeline": "^1.18.0", - "@azure/core-tracing": "^1.2.0", - "@azure/core-util": "^1.11.0", - "@azure/logger": "^1.1.4", - "events": "^3.0.0", + "@azure/core-rest-pipeline": "^1.20.0", + "@azure/core-tracing": "^1.0.0", + "@azure/logger": "^1.2.0", "tslib": "^2.8.1" }, "devDependencies": { @@ -86,50 +95,36 @@ "@azure-tools/test-recorder": "workspace:^", "@azure-tools/test-utils-vitest": "workspace:^", "@azure/dev-tool": "workspace:^", + "tshy": "catalog:", "@azure/eslint-plugin-azure-sdk": "workspace:^", "@azure/identity": "catalog:internal", - "@azure/openai": "1.0.0-beta.12", "@types/node": "catalog:", - "@vitest/browser-playwright": "catalog:testing", - "@vitest/coverage-istanbul": "catalog:testing", "cross-env": "catalog:", - "dotenv": "catalog:testing", "eslint": "catalog:", - "playwright": "catalog:testing", "prettier": "catalog:", "rimraf": "catalog:", - "tshy": "catalog:", - "type-plus": "^7.6.2", "typescript": "catalog:", + "@vitest/coverage-istanbul": "catalog:testing", + "playwright": "catalog:testing", "vitest": "catalog:testing" }, - "//sampleConfiguration": { - "productName": "Azure Search Documents", - "productSlugs": [ - "azure", - "azure-cognitive-search", - "azure-search" - ], - "requiredResources": { - "Azure Search Documents instance": "https://learn.microsoft.com/azure/search/search-create-service-portal" - } - }, - "type": "module", - "tshy": { - "exports": { - "./package.json": "./package.json", - ".": "./src/index.ts" - }, - "dialects": [ - "esm", - "commonjs" - ], - "esmDialects": [ - "browser", - "react-native" - ], - "selfLink": false, - "project": "../../../tsconfig.src.build.json" + "scripts": { + "clean": "rimraf --glob dist dist-browser dist-esm test-dist temp types *.tgz *.log", + "extract-api": "rimraf review && dev-tool run extract-api", + "pack": "pnpm pack 2>&1", + "lint": "eslint package.json src test", + "lint:fix": "eslint package.json src test --fix --fix-type [problem,suggestion]", + "build:samples": "echo skipped", + "check-format": "prettier --list-different --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.{ts,cts,mts}\" \"test/**/*.{ts,cts,mts}\" \"*.{js,cjs,mjs,json}\" ", + "execute:samples": "echo skipped", + "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.{ts,cts,mts}\" \"generated/**/*.{ts,cts,mts}\" \"test/**/*.{ts,cts,mts}\" \"*.{js,cjs,mjs,json}\" ", + "generate:client": "tsp-client update && npm run format && npx dev-tool customization apply-v2 --skip index.ts", + "test:browser": "dev-tool run build-test && dev-tool run test:vitest --browser", + "build": "npm run clean && dev-tool run build-package && dev-tool run extract-api", + "test:node": "dev-tool run test:vitest", + "test:node:esm": "dev-tool run test:vitest --esm", + "test": "npm run test:node && npm run test:browser", + "update-snippets": "dev-tool run update-snippets" }, "exports": { "./package.json": "./package.json", @@ -150,6 +145,225 @@ "types": "./dist/commonjs/index.d.ts", "default": "./dist/commonjs/index.js" } + }, + "./search": { + "browser": { + "types": "./dist/browser/search/index.d.ts", + "default": "./dist/browser/search/index.js" + }, + "react-native": { + "types": "./dist/react-native/search/index.d.ts", + "default": "./dist/react-native/search/index.js" + }, + "import": { + "types": "./dist/esm/search/index.d.ts", + "default": "./dist/esm/search/index.js" + }, + "require": { + "types": "./dist/commonjs/search/index.d.ts", + "default": "./dist/commonjs/search/index.js" + } + }, + "./search/api": { + "browser": { + "types": "./dist/browser/search/api/index.d.ts", + "default": "./dist/browser/search/api/index.js" + }, + "react-native": { + "types": "./dist/react-native/search/api/index.d.ts", + "default": "./dist/react-native/search/api/index.js" + }, + "import": { + "types": "./dist/esm/search/api/index.d.ts", + "default": "./dist/esm/search/api/index.js" + }, + "require": { + "types": "./dist/commonjs/search/api/index.d.ts", + "default": "./dist/commonjs/search/api/index.js" + } + }, + "./searchIndex": { + "browser": { + "types": "./dist/browser/searchIndex/index.d.ts", + "default": "./dist/browser/searchIndex/index.js" + }, + "react-native": { + "types": "./dist/react-native/searchIndex/index.d.ts", + "default": "./dist/react-native/searchIndex/index.js" + }, + "import": { + "types": "./dist/esm/searchIndex/index.d.ts", + "default": "./dist/esm/searchIndex/index.js" + }, + "require": { + "types": "./dist/commonjs/searchIndex/index.d.ts", + "default": "./dist/commonjs/searchIndex/index.js" + } + }, + "./searchIndex/api": { + "browser": { + "types": "./dist/browser/searchIndex/api/index.d.ts", + "default": "./dist/browser/searchIndex/api/index.js" + }, + "react-native": { + "types": "./dist/react-native/searchIndex/api/index.d.ts", + "default": "./dist/react-native/searchIndex/api/index.js" + }, + "import": { + "types": "./dist/esm/searchIndex/api/index.d.ts", + "default": "./dist/esm/searchIndex/api/index.js" + }, + "require": { + "types": "./dist/commonjs/searchIndex/api/index.d.ts", + "default": "./dist/commonjs/searchIndex/api/index.js" + } + }, + "./searchIndexer": { + "browser": { + "types": "./dist/browser/searchIndexer/index.d.ts", + "default": "./dist/browser/searchIndexer/index.js" + }, + "react-native": { + "types": "./dist/react-native/searchIndexer/index.d.ts", + "default": "./dist/react-native/searchIndexer/index.js" + }, + "import": { + "types": "./dist/esm/searchIndexer/index.d.ts", + "default": "./dist/esm/searchIndexer/index.js" + }, + "require": { + "types": "./dist/commonjs/searchIndexer/index.d.ts", + "default": "./dist/commonjs/searchIndexer/index.js" + } + }, + "./searchIndexer/api": { + "browser": { + "types": "./dist/browser/searchIndexer/api/index.d.ts", + "default": "./dist/browser/searchIndexer/api/index.js" + }, + "react-native": { + "types": "./dist/react-native/searchIndexer/api/index.d.ts", + "default": "./dist/react-native/searchIndexer/api/index.js" + }, + "import": { + "types": "./dist/esm/searchIndexer/api/index.d.ts", + "default": "./dist/esm/searchIndexer/api/index.js" + }, + "require": { + "types": "./dist/commonjs/searchIndexer/api/index.d.ts", + "default": "./dist/commonjs/searchIndexer/api/index.js" + } + }, + "./knowledgeBaseRetrieval": { + "browser": { + "types": "./dist/browser/knowledgeBaseRetrieval/index.d.ts", + "default": "./dist/browser/knowledgeBaseRetrieval/index.js" + }, + "react-native": { + "types": "./dist/react-native/knowledgeBaseRetrieval/index.d.ts", + "default": "./dist/react-native/knowledgeBaseRetrieval/index.js" + }, + "import": { + "types": "./dist/esm/knowledgeBaseRetrieval/index.d.ts", + "default": "./dist/esm/knowledgeBaseRetrieval/index.js" + }, + "require": { + "types": "./dist/commonjs/knowledgeBaseRetrieval/index.d.ts", + "default": "./dist/commonjs/knowledgeBaseRetrieval/index.js" + } + }, + "./knowledgeBaseRetrieval/api": { + "browser": { + "types": "./dist/browser/knowledgeBaseRetrieval/api/index.d.ts", + "default": "./dist/browser/knowledgeBaseRetrieval/api/index.js" + }, + "react-native": { + "types": "./dist/react-native/knowledgeBaseRetrieval/api/index.d.ts", + "default": "./dist/react-native/knowledgeBaseRetrieval/api/index.js" + }, + "import": { + "types": "./dist/esm/knowledgeBaseRetrieval/api/index.d.ts", + "default": "./dist/esm/knowledgeBaseRetrieval/api/index.js" + }, + "require": { + "types": "./dist/commonjs/knowledgeBaseRetrieval/api/index.d.ts", + "default": "./dist/commonjs/knowledgeBaseRetrieval/api/index.js" + } + }, + "./models": { + "browser": { + "types": "./dist/browser/models/index.d.ts", + "default": "./dist/browser/models/index.js" + }, + "react-native": { + "types": "./dist/react-native/models/index.d.ts", + "default": "./dist/react-native/models/index.js" + }, + "import": { + "types": "./dist/esm/models/index.d.ts", + "default": "./dist/esm/models/index.js" + }, + "require": { + "types": "./dist/commonjs/models/index.d.ts", + "default": "./dist/commonjs/models/index.js" + } + }, + "./models/azure/search/documents": { + "browser": { + "types": "./dist/browser/models/azure/search/documents/index.d.ts", + "default": "./dist/browser/models/azure/search/documents/index.js" + }, + "react-native": { + "types": "./dist/react-native/models/azure/search/documents/index.d.ts", + "default": "./dist/react-native/models/azure/search/documents/index.js" + }, + "import": { + "types": "./dist/esm/models/azure/search/documents/index.d.ts", + "default": "./dist/esm/models/azure/search/documents/index.js" + }, + "require": { + "types": "./dist/commonjs/models/azure/search/documents/index.d.ts", + "default": "./dist/commonjs/models/azure/search/documents/index.js" + } + }, + "./models/azure/search/documents/indexes": { + "browser": { + "types": "./dist/browser/models/azure/search/documents/indexes/index.d.ts", + "default": "./dist/browser/models/azure/search/documents/indexes/index.js" + }, + "react-native": { + "types": "./dist/react-native/models/azure/search/documents/indexes/index.d.ts", + "default": "./dist/react-native/models/azure/search/documents/indexes/index.js" + }, + "import": { + "types": "./dist/esm/models/azure/search/documents/indexes/index.d.ts", + "default": "./dist/esm/models/azure/search/documents/indexes/index.js" + }, + "require": { + "types": "./dist/commonjs/models/azure/search/documents/indexes/index.d.ts", + "default": "./dist/commonjs/models/azure/search/documents/indexes/index.js" + } + }, + "./models/azure/search/documents/knowledgeBase": { + "browser": { + "types": "./dist/browser/models/azure/search/documents/knowledgeBase/index.d.ts", + "default": "./dist/browser/models/azure/search/documents/knowledgeBase/index.js" + }, + "react-native": { + "types": "./dist/react-native/models/azure/search/documents/knowledgeBase/index.d.ts", + "default": "./dist/react-native/models/azure/search/documents/knowledgeBase/index.js" + }, + "import": { + "types": "./dist/esm/models/azure/search/documents/knowledgeBase/index.d.ts", + "default": "./dist/esm/models/azure/search/documents/knowledgeBase/index.js" + }, + "require": { + "types": "./dist/commonjs/models/azure/search/documents/knowledgeBase/index.d.ts", + "default": "./dist/commonjs/models/azure/search/documents/knowledgeBase/index.js" + } } - } + }, + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts", + "module": "./dist/esm/index.js" } diff --git a/sdk/search/search-documents/review/search-documents-browser.api.diff.md b/sdk/search/search-documents/review/search-documents-browser.api.diff.md index 441f5b2f0090..2c435ea26e77 100644 --- a/sdk/search/search-documents/review/search-documents-browser.api.diff.md +++ b/sdk/search/search-documents/review/search-documents-browser.api.diff.md @@ -7,7 +7,7 @@ For the complete API surface, see the corresponding -node.api.md file. =================================================================== --- NodeJS +++ browser -@@ -649,9 +649,9 @@ +@@ -592,9 +592,9 @@ // @public export type CreateSkillsetOptions = OperationOptions; diff --git a/sdk/search/search-documents/review/search-documents-knowledgeBaseRetrieval-api-node.api.md b/sdk/search/search-documents/review/search-documents-knowledgeBaseRetrieval-api-node.api.md new file mode 100644 index 000000000000..bdd293dcbe03 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-knowledgeBaseRetrieval-api-node.api.md @@ -0,0 +1,37 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { TokenCredential } from '@azure/core-auth'; + +// @public (undocumented) +export function createKnowledgeBaseRetrieval(endpointParam: string, credential: KeyCredential | TokenCredential, options?: KnowledgeBaseRetrievalClientOptionalParams): KnowledgeBaseRetrievalContext; + +// @public +export interface KnowledgeBaseRetrievalClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface KnowledgeBaseRetrievalContext extends Client { + apiVersion: string; +} + +// @public +export function retrieve(context: KnowledgeBaseRetrievalContext, knowledgeBaseName: string, retrievalRequest: KnowledgeBaseRetrievalRequest, options?: RetrieveOptionalParams): Promise; + +// @public +export interface RetrieveOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-knowledgeBaseRetrieval-node.api.md b/sdk/search/search-documents/review/search-documents-knowledgeBaseRetrieval-node.api.md new file mode 100644 index 000000000000..67b8c55fcec3 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-knowledgeBaseRetrieval-node.api.md @@ -0,0 +1,39 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; + +// @public (undocumented) +export class KnowledgeBaseRetrievalClient { + constructor(endpointParam: string, credential: KeyCredential | TokenCredential, options?: KnowledgeBaseRetrievalClientOptionalParams); + readonly pipeline: Pipeline; + retrieve(knowledgeBaseName: string, retrievalRequest: KnowledgeBaseRetrievalRequest, options?: RetrieveOptionalParams): Promise; +} + +// @public +export interface KnowledgeBaseRetrievalClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface KnowledgeBaseRetrievalContext extends Client { + apiVersion: string; +} + +// @public +export interface RetrieveOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-models-azure-search-documents-indexes-node.api.md b/sdk/search/search-documents/review/search-documents-models-azure-search-documents-indexes-node.api.md new file mode 100644 index 000000000000..dc425d746998 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-models-azure-search-documents-indexes-node.api.md @@ -0,0 +1,3011 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +// @public +export type AIFoundryModelCatalogName = string; + +// @public +export interface AIServicesAccountIdentity extends CognitiveServicesAccount { + identity?: SearchIndexerDataIdentityUnion; + odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; + subdomainUrl: string; +} + +// @public +export interface AIServicesAccountKey extends CognitiveServicesAccount { + key: string; + odatatype: "#Microsoft.Azure.Search.AIServicesByKey"; + subdomainUrl: string; +} + +// @public +export interface AIServicesVisionParameters { + apiKey?: string; + authIdentity?: SearchIndexerDataIdentityUnion; + modelVersion: string; + resourceUri: string; +} + +// @public +export interface AIServicesVisionVectorizer extends VectorSearchVectorizer { + aiServicesVisionParameters?: AIServicesVisionParameters; + kind: "aiServicesVision"; +} + +// @public +export interface AnalyzedTokenInfo { + endOffset: number; + position: number; + startOffset: number; + token: string; +} + +// @public +export interface AnalyzeResult { + tokens: AnalyzedTokenInfo[]; +} + +// @public +export interface AnalyzeTextOptions { + analyzerName?: LexicalAnalyzerName; + charFilters?: CharFilterName[]; + normalizerName?: LexicalNormalizerName; + text: string; + tokenFilters?: TokenFilterName[]; + tokenizerName?: LexicalTokenizerName; +} + +// @public +export interface AsciiFoldingTokenFilter extends TokenFilter { + odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; + preserveOriginal?: boolean; +} + +// @public +export interface AzureActiveDirectoryApplicationCredentials { + applicationId: string; + applicationSecret?: string; +} + +// @public +export interface AzureBlobKnowledgeSource extends KnowledgeSource { + azureBlobParameters: AzureBlobKnowledgeSourceParameters; + // (undocumented) + kind: "azureBlob"; +} + +// @public +export interface AzureBlobKnowledgeSourceParameters { + chatCompletionModel?: KnowledgeBaseModelUnion; + connectionString: string; + containerName: string; + readonly createdResources?: CreatedResources; + disableImageVerbalization?: boolean; + embeddingModel?: VectorSearchVectorizerUnion; + folderPath?: string; + identity?: SearchIndexerDataIdentityUnion; + ingestionSchedule?: IndexingSchedule; +} + +// @public +export interface AzureMachineLearningParameters { + authenticationKey?: string; + modelName?: AIFoundryModelCatalogName; + region?: string; + resourceId?: string; + scoringUri: string; + timeout?: string; +} + +// @public +export interface AzureMachineLearningSkill extends SearchIndexerSkill { + authenticationKey?: string; + degreeOfParallelism?: number; + odatatype: "#Microsoft.Skills.Custom.AmlSkill"; + region?: string; + resourceId?: string; + scoringUri?: string; + timeout?: string; +} + +// @public +export interface AzureMachineLearningVectorizer extends VectorSearchVectorizer { + amlParameters?: AzureMachineLearningParameters; + kind: "aml"; +} + +// @public +export interface AzureOpenAIEmbeddingSkill extends SearchIndexerSkill { + apiKey?: string; + authIdentity?: SearchIndexerDataIdentityUnion; + deploymentName?: string; + dimensions?: number; + modelName?: AzureOpenAIModelName; + odatatype: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; + resourceUrl?: string; +} + +// @public +export type AzureOpenAIModelName = string; + +// @public +export interface AzureOpenAiParameters { + apiKey?: string; + authenticationMethod?: string; + authIdentity?: string; + deploymentId: string; + modelName?: AzureOpenAIModelName; + resourceUri: string; +} + +// @public +export interface AzureOpenAITokenizerParameters { + allowedSpecialTokens?: string[]; + encoderModelName?: SplitSkillEncoderModelName; +} + +// @public +export interface AzureOpenAIVectorizer extends VectorSearchVectorizer { + kind: "azureOpenAI"; + parameters?: AzureOpenAIVectorizerParameters; +} + +// @public +export interface AzureOpenAIVectorizerParameters { + apiKey?: string; + authIdentity?: SearchIndexerDataIdentityUnion; + deploymentName?: string; + modelName?: AzureOpenAIModelName; + resourceUrl?: string; +} + +// @public +export interface BinaryQuantizationCompression extends VectorSearchCompression { + kind: "binaryQuantization"; +} + +// @public +export type BlobIndexerDataToExtract = string; + +// @public +export type BlobIndexerImageAction = string; + +// @public +export type BlobIndexerParsingMode = string; + +// @public +export type BlobIndexerPDFTextRotationAlgorithm = string; + +// @public +export interface BM25Similarity extends SimilarityAlgorithm { + b?: number; + k1?: number; + odatatype: "#Microsoft.Azure.Search.BM25Similarity"; +} + +// @public +export interface CharFilter { + name: string; + odatatype: string; +} + +// @public +export type CharFilterName = string; + +// @public +export type CharFilterUnion = MappingCharFilter | PatternReplaceCharFilter | CharFilter; + +// @public +export type ChatCompletionExtraParametersBehavior = string; + +// @public +export interface ChatCompletionResponseFormat { + jsonSchemaProperties?: ChatCompletionSchemaProperties; + type?: ChatCompletionResponseFormatType; +} + +// @public +export type ChatCompletionResponseFormatType = string; + +// @public +export interface ChatCompletionSchema { + additionalProperties?: boolean; + properties?: string; + required?: string[]; + type?: string; +} + +// @public +export interface ChatCompletionSchemaProperties { + description?: string; + name?: string; + schema?: ChatCompletionSchema; + strict?: boolean; +} + +// @public +export interface ChatCompletionSkill extends SearchIndexerSkill { + apiKey?: string; + authIdentity?: SearchIndexerDataIdentityUnion; + authResourceId?: string; + batchSize?: number; + commonModelParameters?: CommonModelParameters; + degreeOfParallelism?: number; + extraParameters?: Record; + extraParametersBehavior?: ChatCompletionExtraParametersBehavior; + httpHeaders?: WebApiHttpHeaders; + httpMethod?: string; + odataType: "#Microsoft.Skills.Custom.ChatCompletionSkill"; + responseFormat?: ChatCompletionResponseFormat; + timeout?: string; + uri: string; +} + +// @public +export interface CjkBigramTokenFilter extends TokenFilter { + ignoreScripts?: CjkBigramTokenFilterScripts[]; + odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; + outputUnigrams?: boolean; +} + +// @public +export type CjkBigramTokenFilterScripts = string; + +// @public +export interface ClassicSimilarity extends SimilarityAlgorithm { + odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; +} + +// @public +export interface ClassicTokenizer extends LexicalTokenizer { + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; +} + +// @public +export interface CognitiveServicesAccount { + description?: string; + odatatype: string; +} + +// @public +export interface CognitiveServicesAccountKey extends CognitiveServicesAccount { + key: string; + odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; +} + +// @public +export type CognitiveServicesAccountUnion = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey | AIServicesAccountKey | AIServicesAccountIdentity | CognitiveServicesAccount; + +// @public +export interface CommonGramTokenFilter extends TokenFilter { + commonWords: string[]; + ignoreCase?: boolean; + odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; + useQueryMode?: boolean; +} + +// @public +export interface CommonModelParameters { + frequencyPenalty?: number; + maxTokens?: number; + modelName?: string; + presencePenalty?: number; + seed?: number; + stop?: string[]; + temperature?: number; +} + +// @public +export interface ConditionalSkill extends SearchIndexerSkill { + odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; +} + +// @public +export interface ContentUnderstandingSkill extends SearchIndexerSkill { + chunkingProperties?: ContentUnderstandingSkillChunkingProperties; + extractionOptions?: ContentUnderstandingSkillExtractionOptions[]; + odataType: "#Microsoft.Skills.Util.ContentUnderstandingSkill"; +} + +// @public +export interface ContentUnderstandingSkillChunkingProperties { + maximumLength?: number; + overlapLength?: number; + unit?: ContentUnderstandingSkillChunkingUnit; +} + +// @public +export type ContentUnderstandingSkillChunkingUnit = string; + +// @public +export type ContentUnderstandingSkillExtractionOptions = string; + +// @public +export interface CorsOptions { + allowedOrigins: string[]; + maxAgeInSeconds?: number; +} + +// @public +export interface CreatedResources { + additionalProperties?: Record; +} + +// @public +export interface CustomAnalyzer extends LexicalAnalyzer { + charFilters?: CharFilterName[]; + odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; + tokenFilters?: TokenFilterName[]; + tokenizer: LexicalTokenizerName; +} + +// @public +export interface CustomEntity { + accentSensitive?: boolean; + aliases?: CustomEntityAlias[]; + caseSensitive?: boolean; + defaultAccentSensitive?: boolean; + defaultCaseSensitive?: boolean; + defaultFuzzyEditDistance?: number; + description?: string; + fuzzyEditDistance?: number; + id?: string; + name: string; + subtype?: string; + type?: string; +} + +// @public +export interface CustomEntityAlias { + accentSensitive?: boolean; + caseSensitive?: boolean; + fuzzyEditDistance?: number; + text: string; +} + +// @public +export interface CustomEntityLookupSkill extends SearchIndexerSkill { + defaultLanguageCode?: CustomEntityLookupSkillLanguage; + entitiesDefinitionUri?: string; + globalDefaultAccentSensitive?: boolean; + globalDefaultCaseSensitive?: boolean; + globalDefaultFuzzyEditDistance?: number; + inlineEntitiesDefinition?: CustomEntity[]; + odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill"; +} + +// @public +export type CustomEntityLookupSkillLanguage = string; + +// @public +export interface CustomNormalizer extends LexicalNormalizer { + charFilters?: CharFilterName[]; + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; + tokenFilters?: TokenFilterName[]; +} + +// @public +export interface DataChangeDetectionPolicy { + odatatype: string; +} + +// @public +export type DataChangeDetectionPolicyUnion = HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy | DataChangeDetectionPolicy; + +// @public +export interface DataDeletionDetectionPolicy { + odatatype: string; +} + +// @public +export type DataDeletionDetectionPolicyUnion = SoftDeleteColumnDeletionDetectionPolicy | NativeBlobSoftDeleteDeletionDetectionPolicy | DataDeletionDetectionPolicy; + +// @public +export interface DataSourceCredentials { + connectionString?: string; +} + +// @public +export interface DefaultCognitiveServicesAccount extends CognitiveServicesAccount { + odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; +} + +// @public +export interface DictionaryDecompounderTokenFilter extends TokenFilter { + maxSubwordSize?: number; + minSubwordSize?: number; + minWordSize?: number; + odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; + onlyLongestMatch?: boolean; + wordList: string[]; +} + +// @public +export interface DistanceScoringFunction extends ScoringFunction { + parameters: DistanceScoringParameters; + type: "distance"; +} + +// @public +export interface DistanceScoringParameters { + boostingDistance: number; + referencePointParameter: string; +} + +// @public +export interface DocumentExtractionSkill extends SearchIndexerSkill { + configuration?: Record; + dataToExtract?: string; + odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill"; + parsingMode?: string; +} + +// @public +export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill { + chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties; + extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[]; + markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; + odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; + outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat; + outputMode?: DocumentIntelligenceLayoutSkillOutputMode; +} + +// @public +export interface DocumentIntelligenceLayoutSkillChunkingProperties { + maximumLength?: number; + overlapLength?: number; + unit?: DocumentIntelligenceLayoutSkillChunkingUnit; +} + +// @public +export type DocumentIntelligenceLayoutSkillChunkingUnit = string; + +// @public +export type DocumentIntelligenceLayoutSkillExtractionOptions = string; + +// @public +export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; + +// @public +export type DocumentIntelligenceLayoutSkillOutputFormat = string; + +// @public +export type DocumentIntelligenceLayoutSkillOutputMode = string; + +// @public +export interface DocumentKeysOrIds { + datasourceDocumentIds?: string[]; + documentKeys?: string[]; +} + +// @public +export interface EdgeNGramTokenFilter extends TokenFilter { + maxGram?: number; + minGram?: number; + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"; + side?: EdgeNGramTokenFilterSide; +} + +// @public +export type EdgeNGramTokenFilterSide = string; + +// @public +export interface EdgeNGramTokenizer extends LexicalTokenizer { + maxGram?: number; + minGram?: number; + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; + tokenChars?: TokenCharacterKind[]; +} + +// @public +export interface ElisionTokenFilter extends TokenFilter { + articles?: string[]; + odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; +} + +// @public +export type EntityCategory = string; + +// @public +export interface EntityLinkingSkill extends SearchIndexerSkill { + defaultLanguageCode?: string; + minimumPrecision?: number; + modelVersion?: string; + odatatype: "#Microsoft.Skills.Text.V3.EntityLinkingSkill"; +} + +// @public +export interface EntityRecognitionSkill extends SearchIndexerSkill { + categories?: EntityCategory[]; + defaultLanguageCode?: EntityRecognitionSkillLanguage; + includeTypelessEntities?: boolean; + minimumPrecision?: number; + odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; +} + +// @public +export type EntityRecognitionSkillLanguage = string; + +// @public +export interface EntityRecognitionSkillV3 extends SearchIndexerSkill { + categories?: string[]; + defaultLanguageCode?: string; + minimumPrecision?: number; + modelVersion?: string; + odatatype: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill"; +} + +// @public +export interface ExhaustiveKnnAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration { + kind: "exhaustiveKnn"; + parameters?: ExhaustiveKnnParameters; +} + +// @public +export interface ExhaustiveKnnParameters { + metric?: VectorSearchAlgorithmMetric; +} + +// @public +export interface FieldMapping { + mappingFunction?: FieldMappingFunction; + sourceFieldName: string; + targetFieldName?: string; +} + +// @public +export interface FieldMappingFunction { + name: string; + parameters?: Record; +} + +// @public +export interface FreshnessScoringFunction extends ScoringFunction { + parameters: FreshnessScoringParameters; + type: "freshness"; +} + +// @public +export interface FreshnessScoringParameters { + boostingDuration: string; +} + +// @public +export interface GetIndexStatisticsResult { + documentCount: number; + storageSize: number; + vectorIndexSize: number; +} + +// @public +export interface HighWaterMarkChangeDetectionPolicy extends DataChangeDetectionPolicy { + highWaterMarkColumnName: string; + odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; +} + +// @public +export interface HnswAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration { + kind: "hnsw"; + parameters?: HnswParameters; +} + +// @public +export interface HnswParameters { + efConstruction?: number; + efSearch?: number; + m?: number; + metric?: VectorSearchAlgorithmMetric; +} + +// @public +export interface ImageAnalysisSkill extends SearchIndexerSkill { + defaultLanguageCode?: ImageAnalysisSkillLanguage; + details?: ImageDetail[]; + odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; + visualFeatures?: VisualFeature[]; +} + +// @public +export type ImageAnalysisSkillLanguage = string; + +// @public +export type ImageDetail = string; + +// @public +export interface IndexerCurrentState { + readonly allDocsFinalTrackingState?: string; + readonly allDocsInitialTrackingState?: string; + readonly mode?: IndexingMode; + readonly resetDatasourceDocumentIds?: string[]; + readonly resetDocsFinalTrackingState?: string; + readonly resetDocsInitialTrackingState?: string; + readonly resetDocumentKeys?: string[]; + readonly resyncFinalTrackingState?: string; + readonly resyncInitialTrackingState?: string; +} + +// @public +export type IndexerExecutionEnvironment = string; + +// @public +export interface IndexerExecutionResult { + readonly currentState?: IndexerCurrentState; + endTime?: Date; + errorMessage?: string; + errors: SearchIndexerError[]; + failedItemCount: number; + finalTrackingState?: string; + initialTrackingState?: string; + itemCount: number; + readonly mode?: IndexingMode; + startTime?: Date; + status: IndexerExecutionStatus; + readonly statusDetail?: IndexerExecutionStatusDetail; + warnings: SearchIndexerWarning[]; +} + +// @public +export type IndexerExecutionStatus = string; + +// @public +export type IndexerExecutionStatusDetail = string; + +// @public +export type IndexerPermissionOption = string; + +// @public +export interface IndexerResyncBody { + options?: IndexerResyncOption[]; +} + +// @public +export type IndexerResyncOption = string; + +// @public +export type IndexerStatus = string; + +// @public +export type IndexingMode = string; + +// @public +export interface IndexingParameters { + batchSize?: number; + configuration?: IndexingParametersConfiguration; + maxFailedItems?: number; + maxFailedItemsPerBatch?: number; +} + +// @public +export interface IndexingParametersConfiguration { + additionalProperties?: Record; + allowSkillsetToReadFileData?: boolean; + dataToExtract?: BlobIndexerDataToExtract; + delimitedTextDelimiter?: string; + delimitedTextHeaders?: string; + documentRoot?: string; + excludedFileNameExtensions?: string; + executionEnvironment?: IndexerExecutionEnvironment; + failOnUnprocessableDocument?: boolean; + failOnUnsupportedContentType?: boolean; + firstLineContainsHeaders?: boolean; + imageAction?: BlobIndexerImageAction; + indexedFileNameExtensions?: string; + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + markdownHeaderDepth?: MarkdownHeaderDepth; + markdownParsingSubmode?: MarkdownParsingSubmode; + parsingMode?: BlobIndexerParsingMode; + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; + queryTimeout?: string; +} + +// @public +export interface IndexingSchedule { + interval: string; + startTime?: Date; +} + +// @public +export type IndexProjectionMode = string; + +// @public +export interface IndexStatisticsSummary { + readonly documentCount: number; + readonly name: string; + readonly storageSize: number; + readonly vectorIndexSize?: number; +} + +// @public +export interface InputFieldMappingEntry { + inputs?: InputFieldMappingEntry[]; + name: string; + source?: string; + sourceContext?: string; +} + +// @public +export interface KeepTokenFilter extends TokenFilter { + keepWords: string[]; + lowerCaseKeepWords?: boolean; + odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; +} + +// @public +export interface KeyPhraseExtractionSkill extends SearchIndexerSkill { + defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; + maxKeyPhraseCount?: number; + modelVersion?: string; + odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; +} + +// @public +export type KeyPhraseExtractionSkillLanguage = string; + +// @public +export interface KeywordMarkerTokenFilter extends TokenFilter { + ignoreCase?: boolean; + keywords: string[]; + odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; +} + +// @public +export interface KeywordTokenizer extends LexicalTokenizer { + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2"; +} + +// @public +export interface KnowledgeBase { + answerInstructions?: string; + description?: string; + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + knowledgeSources: KnowledgeSourceReference[]; + models?: KnowledgeBaseModelUnion[]; + readonly name: string; + outputMode?: KnowledgeRetrievalOutputMode; + retrievalInstructions?: string; + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; +} + +// @public +export interface KnowledgeBaseAzureOpenAIModel extends KnowledgeBaseModel { + azureOpenAIParameters: AzureOpenAiParameters; + // (undocumented) + kind: "azureOpenAI"; +} + +// @public +export interface KnowledgeBaseModel { + kind: KnowledgeBaseModelKind; +} + +// @public +export type KnowledgeBaseModelKind = string; + +// @public +export type KnowledgeBaseModelUnion = KnowledgeBaseAzureOpenAIModel | KnowledgeBaseModel; + +// @public +export interface KnowledgeSource { + description?: string; + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + kind: KnowledgeSourceKind; + readonly name: string; +} + +// @public +export type KnowledgeSourceKind = string; + +// @public +export interface KnowledgeSourceReference { + name: string; +} + +// @public +export type KnowledgeSourceUnion = SearchIndexKnowledgeSource | AzureBlobKnowledgeSource | IndexedSharePointKnowledgeSource | IndexedOneLakeKnowledgeSource | WebKnowledgeSource | RemoteSharePointKnowledgeSource | KnowledgeSource; + +// @public +export enum KnownAIFoundryModelCatalogName { + CohereEmbedV3English = "Cohere-embed-v3-english", + CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", + FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", + FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", + OpenAiclipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", + OpenAiclipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" +} + +// @public +export enum KnownAzureOpenAIModelName { + TextEmbedding3Large = "text-embedding-3-large", + TextEmbedding3Small = "text-embedding-3-small", + TextEmbeddingAda002 = "text-embedding-ada-002" +} + +// @public +export enum KnownBlobIndexerDataToExtract { + AllMetadata = "allMetadata", + ContentAndMetadata = "contentAndMetadata", + StorageMetadata = "storageMetadata" +} + +// @public +export enum KnownBlobIndexerImageAction { + GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage", + GenerateNormalizedImages = "generateNormalizedImages", + None = "none" +} + +// @public +export enum KnownBlobIndexerParsingMode { + Default = "default", + DelimitedText = "delimitedText", + Json = "json", + JsonArray = "jsonArray", + JsonLines = "jsonLines", + Markdown = "markdown", + Text = "text" +} + +// @public +export enum KnownBlobIndexerPDFTextRotationAlgorithm { + DetectAngles = "detectAngles", + None = "none" +} + +// @public +export enum KnownCharFilterName { + HtmlStrip = "html_strip" +} + +// @public +export enum KnownChatCompletionExtraParametersBehavior { + Drop = "drop", + Error = "error", + PassThrough = "passThrough" +} + +// @public +export enum KnownChatCompletionResponseFormatType { + JsonObject = "jsonObject", + JsonSchema = "jsonSchema", + Text = "text" +} + +// @public +export enum KnownCjkBigramTokenFilterScripts { + Han = "han", + Hangul = "hangul", + Hiragana = "hiragana", + Katakana = "katakana" +} + +// @public +export enum KnownContentUnderstandingSkillChunkingUnit { + Characters = "characters" +} + +// @public +export enum KnownContentUnderstandingSkillExtractionOptions { + Images = "images", + LocationMetadata = "locationMetadata" +} + +// @public +export enum KnownCustomEntityLookupSkillLanguage { + Da = "da", + De = "de", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Ko = "ko", + Pt = "pt" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillChunkingUnit { + Characters = "characters" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillExtractionOptions { + Images = "images", + LocationMetadata = "locationMetadata" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { + H1 = "h1", + H2 = "h2", + H3 = "h3", + H4 = "h4", + H5 = "h5", + H6 = "h6" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillOutputFormat { + Markdown = "markdown", + Text = "text" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillOutputMode { + OneToMany = "oneToMany" +} + +// @public +export enum KnownEdgeNGramTokenFilterSide { + Back = "back", + Front = "front" +} + +// @public +export enum KnownEntityCategory { + Datetime = "datetime", + Email = "email", + Location = "location", + Organization = "organization", + Person = "person", + Quantity = "quantity", + Url = "url" +} + +// @public +export enum KnownEntityRecognitionSkillLanguage { + Ar = "ar", + Cs = "cs", + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + Hu = "hu", + It = "it", + Ja = "ja", + Ko = "ko", + Nl = "nl", + No = "no", + Pl = "pl", + PtBR = "pt-BR", + PtPT = "pt-PT", + Ru = "ru", + Sv = "sv", + Tr = "tr", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant" +} + +// @public +export enum KnownImageAnalysisSkillLanguage { + Ar = "ar", + Az = "az", + Bg = "bg", + Bs = "bs", + Ca = "ca", + Cs = "cs", + Cy = "cy", + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Et = "et", + Eu = "eu", + Fi = "fi", + Fr = "fr", + Ga = "ga", + Gl = "gl", + He = "he", + Hi = "hi", + Hr = "hr", + Hu = "hu", + Id = "id", + It = "it", + Ja = "ja", + Kk = "kk", + Ko = "ko", + Lt = "lt", + Lv = "lv", + Mk = "mk", + Ms = "ms", + Nb = "nb", + Nl = "nl", + Pl = "pl", + Prs = "prs", + Pt = "pt", + PtBR = "pt-BR", + PtPT = "pt-PT", + Ro = "ro", + Ru = "ru", + Sk = "sk", + Sl = "sl", + SrCyrl = "sr-Cyrl", + SrLatn = "sr-Latn", + Sv = "sv", + Th = "th", + Tr = "tr", + Uk = "uk", + Vi = "vi", + Zh = "zh", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant" +} + +// @public +export enum KnownImageDetail { + Celebrities = "celebrities", + Landmarks = "landmarks" +} + +// @public +export enum KnownIndexerExecutionEnvironment { + Private = "private", + Standard = "standard" +} + +// @public +export enum KnownIndexerExecutionStatus { + InProgress = "inProgress", + Reset = "reset", + Success = "success", + TransientFailure = "transientFailure" +} + +// @public +export enum KnownIndexerExecutionStatusDetail { + ResetDocs = "resetDocs", + Resync = "resync" +} + +// @public +export enum KnownIndexerPermissionOption { + GroupIds = "groupIds", + RbacScope = "rbacScope", + UserIds = "userIds" +} + +// @public +export enum KnownIndexerResyncOption { + Permissions = "permissions" +} + +// @public +export enum KnownIndexerStatus { + Error = "error", + Running = "running", + Unknown = "unknown" +} + +// @public +export enum KnownIndexingMode { + IndexingAllDocs = "indexingAllDocs", + IndexingResetDocs = "indexingResetDocs", + IndexingResync = "indexingResync" +} + +// @public +export enum KnownIndexProjectionMode { + IncludeIndexingParentDocuments = "includeIndexingParentDocuments", + SkipIndexingParentDocuments = "skipIndexingParentDocuments" +} + +// @public +export enum KnownKeyPhraseExtractionSkillLanguage { + Da = "da", + De = "de", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Ja = "ja", + Ko = "ko", + Nl = "nl", + No = "no", + Pl = "pl", + PtBR = "pt-BR", + PtPT = "pt-PT", + Ru = "ru", + Sv = "sv" +} + +// @public +export enum KnownKnowledgeBaseModelKind { + AzureOpenAI = "azureOpenAI" +} + +// @public +export enum KnownKnowledgeSourceKind { + AzureBlob = "azureBlob", + IndexedOneLake = "indexedOneLake", + IndexedSharePoint = "indexedSharePoint", + RemoteSharePoint = "remoteSharePoint", + SearchIndex = "searchIndex", + Web = "web" +} + +// @public +export enum KnownLexicalAnalyzerName { + ArLucene = "ar.lucene", + ArMicrosoft = "ar.microsoft", + BgLucene = "bg.lucene", + BgMicrosoft = "bg.microsoft", + BnMicrosoft = "bn.microsoft", + CaLucene = "ca.lucene", + CaMicrosoft = "ca.microsoft", + CsLucene = "cs.lucene", + CsMicrosoft = "cs.microsoft", + DaLucene = "da.lucene", + DaMicrosoft = "da.microsoft", + DeLucene = "de.lucene", + DeMicrosoft = "de.microsoft", + ElLucene = "el.lucene", + ElMicrosoft = "el.microsoft", + EnLucene = "en.lucene", + EnMicrosoft = "en.microsoft", + EsLucene = "es.lucene", + EsMicrosoft = "es.microsoft", + EtMicrosoft = "et.microsoft", + EuLucene = "eu.lucene", + FaLucene = "fa.lucene", + FiLucene = "fi.lucene", + FiMicrosoft = "fi.microsoft", + FrLucene = "fr.lucene", + FrMicrosoft = "fr.microsoft", + GaLucene = "ga.lucene", + GlLucene = "gl.lucene", + GuMicrosoft = "gu.microsoft", + HeMicrosoft = "he.microsoft", + HiLucene = "hi.lucene", + HiMicrosoft = "hi.microsoft", + HrMicrosoft = "hr.microsoft", + HuLucene = "hu.lucene", + HuMicrosoft = "hu.microsoft", + HyLucene = "hy.lucene", + IdLucene = "id.lucene", + IdMicrosoft = "id.microsoft", + IsMicrosoft = "is.microsoft", + ItLucene = "it.lucene", + ItMicrosoft = "it.microsoft", + JaLucene = "ja.lucene", + JaMicrosoft = "ja.microsoft", + Keyword = "keyword", + KnMicrosoft = "kn.microsoft", + KoLucene = "ko.lucene", + KoMicrosoft = "ko.microsoft", + LtMicrosoft = "lt.microsoft", + LvLucene = "lv.lucene", + LvMicrosoft = "lv.microsoft", + MlMicrosoft = "ml.microsoft", + MrMicrosoft = "mr.microsoft", + MsMicrosoft = "ms.microsoft", + NbMicrosoft = "nb.microsoft", + NlLucene = "nl.lucene", + NlMicrosoft = "nl.microsoft", + NoLucene = "no.lucene", + PaMicrosoft = "pa.microsoft", + Pattern = "pattern", + PlLucene = "pl.lucene", + PlMicrosoft = "pl.microsoft", + PtBrLucene = "pt-BR.lucene", + PtBrMicrosoft = "pt-BR.microsoft", + PtPtLucene = "pt-PT.lucene", + PtPtMicrosoft = "pt-PT.microsoft", + RoLucene = "ro.lucene", + RoMicrosoft = "ro.microsoft", + RuLucene = "ru.lucene", + RuMicrosoft = "ru.microsoft", + Simple = "simple", + SkMicrosoft = "sk.microsoft", + SlMicrosoft = "sl.microsoft", + SrCyrillicMicrosoft = "sr-cyrillic.microsoft", + SrLatinMicrosoft = "sr-latin.microsoft", + StandardAsciiFoldingLucene = "standardasciifolding.lucene", + StandardLucene = "standard.lucene", + Stop = "stop", + SvLucene = "sv.lucene", + SvMicrosoft = "sv.microsoft", + TaMicrosoft = "ta.microsoft", + TeMicrosoft = "te.microsoft", + ThLucene = "th.lucene", + ThMicrosoft = "th.microsoft", + TrLucene = "tr.lucene", + TrMicrosoft = "tr.microsoft", + UkMicrosoft = "uk.microsoft", + UrMicrosoft = "ur.microsoft", + ViMicrosoft = "vi.microsoft", + Whitespace = "whitespace", + ZhHansLucene = "zh-Hans.lucene", + ZhHansMicrosoft = "zh-Hans.microsoft", + ZhHantLucene = "zh-Hant.lucene", + ZhHantMicrosoft = "zh-Hant.microsoft" +} + +// @public +export enum KnownLexicalNormalizerName { + AsciiFolding = "asciifolding", + Elision = "elision", + Lowercase = "lowercase", + Standard = "standard", + Uppercase = "uppercase" +} + +// @public +export enum KnownLexicalTokenizerName { + Classic = "classic", + EdgeNGram = "edgeNGram", + Keyword = "keyword_v2", + Letter = "letter", + Lowercase = "lowercase", + MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer", + MicrosoftLanguageTokenizer = "microsoft_language_tokenizer", + NGram = "nGram", + PathHierarchy = "path_hierarchy_v2", + Pattern = "pattern", + Standard = "standard_v2", + UaxUrlEmail = "uax_url_email", + Whitespace = "whitespace" +} + +// @public +export enum KnownMarkdownHeaderDepth { + H1 = "h1", + H2 = "h2", + H3 = "h3", + H4 = "h4", + H5 = "h5", + H6 = "h6" +} + +// @public +export enum KnownMarkdownParsingSubmode { + OneToMany = "oneToMany", + OneToOne = "oneToOne" +} + +// @public +export enum KnownMicrosoftStemmingTokenizerLanguage { + Arabic = "arabic", + Bangla = "bangla", + Bulgarian = "bulgarian", + Catalan = "catalan", + Croatian = "croatian", + Czech = "czech", + Danish = "danish", + Dutch = "dutch", + English = "english", + Estonian = "estonian", + Finnish = "finnish", + French = "french", + German = "german", + Greek = "greek", + Gujarati = "gujarati", + Hebrew = "hebrew", + Hindi = "hindi", + Hungarian = "hungarian", + Icelandic = "icelandic", + Indonesian = "indonesian", + Italian = "italian", + Kannada = "kannada", + Latvian = "latvian", + Lithuanian = "lithuanian", + Malay = "malay", + Malayalam = "malayalam", + Marathi = "marathi", + NorwegianBokmaal = "norwegianBokmaal", + Polish = "polish", + Portuguese = "portuguese", + PortugueseBrazilian = "portugueseBrazilian", + Punjabi = "punjabi", + Romanian = "romanian", + Russian = "russian", + SerbianCyrillic = "serbianCyrillic", + SerbianLatin = "serbianLatin", + Slovak = "slovak", + Slovenian = "slovenian", + Spanish = "spanish", + Swedish = "swedish", + Tamil = "tamil", + Telugu = "telugu", + Turkish = "turkish", + Ukrainian = "ukrainian", + Urdu = "urdu" +} + +// @public +export enum KnownMicrosoftTokenizerLanguage { + Bangla = "bangla", + Bulgarian = "bulgarian", + Catalan = "catalan", + ChineseSimplified = "chineseSimplified", + ChineseTraditional = "chineseTraditional", + Croatian = "croatian", + Czech = "czech", + Danish = "danish", + Dutch = "dutch", + English = "english", + French = "french", + German = "german", + Greek = "greek", + Gujarati = "gujarati", + Hindi = "hindi", + Icelandic = "icelandic", + Indonesian = "indonesian", + Italian = "italian", + Japanese = "japanese", + Kannada = "kannada", + Korean = "korean", + Malay = "malay", + Malayalam = "malayalam", + Marathi = "marathi", + NorwegianBokmaal = "norwegianBokmaal", + Polish = "polish", + Portuguese = "portuguese", + PortugueseBrazilian = "portugueseBrazilian", + Punjabi = "punjabi", + Romanian = "romanian", + Russian = "russian", + SerbianCyrillic = "serbianCyrillic", + SerbianLatin = "serbianLatin", + Slovenian = "slovenian", + Spanish = "spanish", + Swedish = "swedish", + Tamil = "tamil", + Telugu = "telugu", + Thai = "thai", + Ukrainian = "ukrainian", + Urdu = "urdu", + Vietnamese = "vietnamese" +} + +// @public +export enum KnownOcrLineEnding { + CarriageReturn = "carriageReturn", + CarriageReturnLineFeed = "carriageReturnLineFeed", + LineFeed = "lineFeed", + Space = "space" +} + +// @public +export enum KnownOcrSkillLanguage { + Af = "af", + Anp = "anp", + Ar = "ar", + Ast = "ast", + Awa = "awa", + Az = "az", + Be = "be", + BeCyrl = "be-cyrl", + BeLatn = "be-latn", + Bfy = "bfy", + Bfz = "bfz", + Bg = "bg", + Bgc = "bgc", + Bho = "bho", + Bi = "bi", + Bns = "bns", + Br = "br", + Bra = "bra", + Brx = "brx", + Bs = "bs", + Bua = "bua", + Ca = "ca", + Ceb = "ceb", + Ch = "ch", + CnrCyrl = "cnr-cyrl", + CnrLatn = "cnr-latn", + Co = "co", + Crh = "crh", + Cs = "cs", + Csb = "csb", + Cy = "cy", + Da = "da", + De = "de", + Dhi = "dhi", + Doi = "doi", + Dsb = "dsb", + El = "el", + En = "en", + Es = "es", + Et = "et", + Eu = "eu", + Fa = "fa", + Fi = "fi", + Fil = "fil", + Fj = "fj", + Fo = "fo", + Fr = "fr", + Fur = "fur", + Fy = "fy", + Ga = "ga", + Gag = "gag", + Gd = "gd", + Gil = "gil", + Gl = "gl", + Gon = "gon", + Gv = "gv", + Gvr = "gvr", + Haw = "haw", + Hi = "hi", + Hlb = "hlb", + Hne = "hne", + Hni = "hni", + Hoc = "hoc", + Hr = "hr", + Hsb = "hsb", + Ht = "ht", + Hu = "hu", + Ia = "ia", + Id = "id", + Is = "is", + It = "it", + Iu = "iu", + Ja = "ja", + Jns = "Jns", + Jv = "jv", + Kaa = "kaa", + KaaCyrl = "kaa-cyrl", + Kac = "kac", + Kea = "kea", + Kfq = "kfq", + Kha = "kha", + KkCyrl = "kk-cyrl", + KkLatn = "kk-latn", + Kl = "kl", + Klr = "klr", + Kmj = "kmj", + Ko = "ko", + Kos = "kos", + Kpy = "kpy", + Krc = "krc", + Kru = "kru", + Ksh = "ksh", + KuArab = "ku-arab", + KuLatn = "ku-latn", + Kum = "kum", + Kw = "kw", + Ky = "ky", + La = "la", + Lb = "lb", + Lkt = "lkt", + Lt = "lt", + Mi = "mi", + Mn = "mn", + Mr = "mr", + Ms = "ms", + Mt = "mt", + Mww = "mww", + Myv = "myv", + Nap = "nap", + Nb = "nb", + Ne = "ne", + Niu = "niu", + Nl = "nl", + No = "no", + Nog = "nog", + Oc = "oc", + Os = "os", + Pa = "pa", + Pl = "pl", + Prs = "prs", + Ps = "ps", + Pt = "pt", + Quc = "quc", + Rab = "rab", + Rm = "rm", + Ro = "ro", + Ru = "ru", + Sa = "sa", + Sat = "sat", + Sck = "sck", + Sco = "sco", + Sk = "sk", + Sl = "sl", + Sm = "sm", + Sma = "sma", + Sme = "sme", + Smj = "smj", + Smn = "smn", + Sms = "sms", + So = "so", + Sq = "sq", + Sr = "sr", + SrCyrl = "sr-Cyrl", + SrLatn = "sr-Latn", + Srx = "srx", + Sv = "sv", + Sw = "sw", + Tet = "tet", + Tg = "tg", + Thf = "thf", + Tk = "tk", + To = "to", + Tr = "tr", + Tt = "tt", + Tyv = "tyv", + Ug = "ug", + Unk = "unk", + Ur = "ur", + Uz = "uz", + UzArab = "uz-arab", + UzCyrl = "uz-cyrl", + Vo = "vo", + Wae = "wae", + Xnr = "xnr", + Xsr = "xsr", + Yua = "yua", + Za = "za", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant", + Zu = "zu" +} + +// @public +export enum KnownPermissionFilter { + GroupIds = "groupIds", + RbacScope = "rbacScope", + UserIds = "userIds" +} + +// @public +export enum KnownPhoneticEncoder { + BeiderMorse = "beiderMorse", + Caverphone1 = "caverphone1", + Caverphone2 = "caverphone2", + Cologne = "cologne", + DoubleMetaphone = "doubleMetaphone", + HaasePhonetik = "haasePhonetik", + KoelnerPhonetik = "koelnerPhonetik", + Metaphone = "metaphone", + Nysiis = "nysiis", + RefinedSoundex = "refinedSoundex", + Soundex = "soundex" +} + +// @public +export enum KnownPIIDetectionSkillMaskingMode { + None = "none", + Replace = "replace" +} + +// @public +export enum KnownRankingOrder { + BoostedRerankerScore = "BoostedRerankerScore", + RerankerScore = "RerankerScore" +} + +// @public +export enum KnownRegexFlags { + CanonEq = "CANON_EQ", + CaseInsensitive = "CASE_INSENSITIVE", + Comments = "COMMENTS", + DotAll = "DOTALL", + Literal = "LITERAL", + Multiline = "MULTILINE", + UnicodeCase = "UNICODE_CASE", + UnixLines = "UNIX_LINES" +} + +// @public +export enum KnownScoringFunctionAggregation { + Average = "average", + FirstMatching = "firstMatching", + Maximum = "maximum", + Minimum = "minimum", + Sum = "sum" +} + +// @public +export enum KnownScoringFunctionInterpolation { + Constant = "constant", + Linear = "linear", + Logarithmic = "logarithmic", + Quadratic = "quadratic" +} + +// @public +export enum KnownSearchFieldDataType { + Boolean = "Edm.Boolean", + Byte = "Edm.Byte", + Complex = "Edm.ComplexType", + DateTimeOffset = "Edm.DateTimeOffset", + Double = "Edm.Double", + GeographyPoint = "Edm.GeographyPoint", + Half = "Edm.Half", + Int16 = "Edm.Int16", + Int32 = "Edm.Int32", + Int64 = "Edm.Int64", + SByte = "Edm.SByte", + Single = "Edm.Single", + String = "Edm.String" +} + +// @public +export enum KnownSearchIndexerDataSourceType { + AdlsGen2 = "adlsgen2", + AzureBlob = "azureblob", + AzureSql = "azuresql", + AzureTable = "azuretable", + CosmosDb = "cosmosdb", + MySql = "mysql", + OneLake = "onelake" +} + +// @public +export enum KnownSearchIndexPermissionFilterOption { + Disabled = "disabled", + Enabled = "enabled" +} + +// @public +export enum KnownSentimentSkillLanguage { + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Fi = "fi", + Fr = "fr", + It = "it", + Nl = "nl", + No = "no", + Pl = "pl", + PtPT = "pt-PT", + Ru = "ru", + Sv = "sv", + Tr = "tr" +} + +// @public +export enum KnownSnowballTokenFilterLanguage { + Armenian = "armenian", + Basque = "basque", + Catalan = "catalan", + Danish = "danish", + Dutch = "dutch", + English = "english", + Finnish = "finnish", + French = "french", + German = "german", + German2 = "german2", + Hungarian = "hungarian", + Italian = "italian", + Kp = "kp", + Lovins = "lovins", + Norwegian = "norwegian", + Porter = "porter", + Portuguese = "portuguese", + Romanian = "romanian", + Russian = "russian", + Spanish = "spanish", + Swedish = "swedish", + Turkish = "turkish" +} + +// @public +export enum KnownSplitSkillEncoderModelName { + CL100KBase = "cl100k_base", + P50KBase = "p50k_base", + P50KEdit = "p50k_edit", + R50KBase = "r50k_base" +} + +// @public +export enum KnownSplitSkillLanguage { + Am = "am", + Bs = "bs", + Cs = "cs", + Da = "da", + De = "de", + En = "en", + Es = "es", + Et = "et", + Fi = "fi", + Fr = "fr", + He = "he", + Hi = "hi", + Hr = "hr", + Hu = "hu", + Id = "id", + Is = "is", + It = "it", + Ja = "ja", + Ko = "ko", + Lv = "lv", + Nb = "nb", + Nl = "nl", + Pl = "pl", + Pt = "pt", + PtBr = "pt-br", + Ru = "ru", + Sk = "sk", + Sl = "sl", + Sr = "sr", + Sv = "sv", + Tr = "tr", + Ur = "ur", + Zh = "zh" +} + +// @public +export enum KnownSplitSkillUnit { + AzureOpenAITokens = "azureOpenAITokens", + Characters = "characters" +} + +// @public +export enum KnownStemmerTokenFilterLanguage { + Arabic = "arabic", + Armenian = "armenian", + Basque = "basque", + Brazilian = "brazilian", + Bulgarian = "bulgarian", + Catalan = "catalan", + Czech = "czech", + Danish = "danish", + Dutch = "dutch", + DutchKp = "dutchKp", + English = "english", + Finnish = "finnish", + French = "french", + Galician = "galician", + German = "german", + German2 = "german2", + Greek = "greek", + Hindi = "hindi", + Hungarian = "hungarian", + Indonesian = "indonesian", + Irish = "irish", + Italian = "italian", + Latvian = "latvian", + LightEnglish = "lightEnglish", + LightFinnish = "lightFinnish", + LightFrench = "lightFrench", + LightGerman = "lightGerman", + LightHungarian = "lightHungarian", + LightItalian = "lightItalian", + LightNorwegian = "lightNorwegian", + LightNynorsk = "lightNynorsk", + LightPortuguese = "lightPortuguese", + LightRussian = "lightRussian", + LightSpanish = "lightSpanish", + LightSwedish = "lightSwedish", + Lovins = "lovins", + MinimalEnglish = "minimalEnglish", + MinimalFrench = "minimalFrench", + MinimalGalician = "minimalGalician", + MinimalGerman = "minimalGerman", + MinimalNorwegian = "minimalNorwegian", + MinimalNynorsk = "minimalNynorsk", + MinimalPortuguese = "minimalPortuguese", + Norwegian = "norwegian", + Porter2 = "porter2", + Portuguese = "portuguese", + PortugueseRslp = "portugueseRslp", + PossessiveEnglish = "possessiveEnglish", + Romanian = "romanian", + Russian = "russian", + Sorani = "sorani", + Spanish = "spanish", + Swedish = "swedish", + Turkish = "turkish" +} + +// @public +export enum KnownStopwordsList { + Arabic = "arabic", + Armenian = "armenian", + Basque = "basque", + Brazilian = "brazilian", + Bulgarian = "bulgarian", + Catalan = "catalan", + Czech = "czech", + Danish = "danish", + Dutch = "dutch", + English = "english", + Finnish = "finnish", + French = "french", + Galician = "galician", + German = "german", + Greek = "greek", + Hindi = "hindi", + Hungarian = "hungarian", + Indonesian = "indonesian", + Irish = "irish", + Italian = "italian", + Latvian = "latvian", + Norwegian = "norwegian", + Persian = "persian", + Portuguese = "portuguese", + Romanian = "romanian", + Russian = "russian", + Sorani = "sorani", + Spanish = "spanish", + Swedish = "swedish", + Thai = "thai", + Turkish = "turkish" +} + +// @public +export enum KnownTextSplitMode { + Pages = "pages", + Sentences = "sentences" +} + +// @public +export enum KnownTextTranslationSkillLanguage { + Af = "af", + Ar = "ar", + Bg = "bg", + Bn = "bn", + Bs = "bs", + Ca = "ca", + Cs = "cs", + Cy = "cy", + Da = "da", + De = "de", + El = "el", + En = "en", + Es = "es", + Et = "et", + Fa = "fa", + Fi = "fi", + Fil = "fil", + Fj = "fj", + Fr = "fr", + Ga = "ga", + He = "he", + Hi = "hi", + Hr = "hr", + Ht = "ht", + Hu = "hu", + Id = "id", + Is = "is", + It = "it", + Ja = "ja", + Kn = "kn", + Ko = "ko", + Lt = "lt", + Lv = "lv", + Mg = "mg", + Mi = "mi", + Ml = "ml", + Ms = "ms", + Mt = "mt", + Mww = "mww", + Nb = "nb", + Nl = "nl", + Otq = "otq", + Pa = "pa", + Pl = "pl", + Pt = "pt", + PtBr = "pt-br", + PtPT = "pt-PT", + Ro = "ro", + Ru = "ru", + Sk = "sk", + Sl = "sl", + Sm = "sm", + SrCyrl = "sr-Cyrl", + SrLatn = "sr-Latn", + Sv = "sv", + Sw = "sw", + Ta = "ta", + Te = "te", + Th = "th", + Tlh = "tlh", + TlhLatn = "tlh-Latn", + TlhPiqd = "tlh-Piqd", + To = "to", + Tr = "tr", + Ty = "ty", + Uk = "uk", + Ur = "ur", + Vi = "vi", + Yua = "yua", + Yue = "yue", + ZhHans = "zh-Hans", + ZhHant = "zh-Hant" +} + +// @public +export enum KnownTokenCharacterKind { + Digit = "digit", + Letter = "letter", + Punctuation = "punctuation", + Symbol = "symbol", + Whitespace = "whitespace" +} + +// @public +export enum KnownTokenFilterName { + Apostrophe = "apostrophe", + ArabicNormalization = "arabic_normalization", + AsciiFolding = "asciifolding", + CjkBigram = "cjk_bigram", + CjkWidth = "cjk_width", + Classic = "classic", + CommonGram = "common_grams", + EdgeNGram = "edgeNGram_v2", + Elision = "elision", + GermanNormalization = "german_normalization", + HindiNormalization = "hindi_normalization", + IndicNormalization = "indic_normalization", + KeywordRepeat = "keyword_repeat", + KStem = "kstem", + Length = "length", + Limit = "limit", + Lowercase = "lowercase", + NGram = "nGram_v2", + PersianNormalization = "persian_normalization", + Phonetic = "phonetic", + PorterStem = "porter_stem", + Reverse = "reverse", + ScandinavianFoldingNormalization = "scandinavian_folding", + ScandinavianNormalization = "scandinavian_normalization", + Shingle = "shingle", + Snowball = "snowball", + SoraniNormalization = "sorani_normalization", + Stemmer = "stemmer", + Stopwords = "stopwords", + Trim = "trim", + Truncate = "truncate", + Unique = "unique", + Uppercase = "uppercase", + WordDelimiter = "word_delimiter" +} + +// @public +export enum KnownVectorEncodingFormat { + PackedBit = "packedBit" +} + +// @public +export enum KnownVectorSearchAlgorithmKind { + ExhaustiveKnn = "exhaustiveKnn", + Hnsw = "hnsw" +} + +// @public +export enum KnownVectorSearchAlgorithmMetric { + Cosine = "cosine", + DotProduct = "dotProduct", + Euclidean = "euclidean", + Hamming = "hamming" +} + +// @public +export enum KnownVectorSearchCompressionKind { + BinaryQuantization = "binaryQuantization", + ScalarQuantization = "scalarQuantization" +} + +// @public +export enum KnownVectorSearchCompressionRescoreStorageMethod { + DiscardOriginals = "discardOriginals", + PreserveOriginals = "preserveOriginals" +} + +// @public +export enum KnownVectorSearchCompressionTarget { + Int8 = "int8" +} + +// @public +export enum KnownVectorSearchVectorizerKind { + AIServicesVision = "aiServicesVision", + AML = "aml", + AzureOpenAI = "azureOpenAI", + CustomWebApi = "customWebApi" +} + +// @public +export enum KnownVisualFeature { + Adult = "adult", + Brands = "brands", + Categories = "categories", + Description = "description", + Faces = "faces", + Objects = "objects", + Tags = "tags" +} + +// @public +export interface LanguageDetectionSkill extends SearchIndexerSkill { + defaultCountryHint?: string; + modelVersion?: string; + odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; +} + +// @public +export interface LengthTokenFilter extends TokenFilter { + maxLength?: number; + minLength?: number; + odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; +} + +// @public +export interface LexicalAnalyzer { + name: string; + odatatype: string; +} + +// @public +export type LexicalAnalyzerName = string; + +// @public +export type LexicalAnalyzerUnion = CustomAnalyzer | PatternAnalyzer | LuceneStandardAnalyzer | StopAnalyzer | LexicalAnalyzer; + +// @public +export interface LexicalNormalizer { + name: string; + odatatype: string; +} + +// @public +export type LexicalNormalizerName = string; + +// @public +export type LexicalNormalizerUnion = CustomNormalizer | LexicalNormalizer; + +// @public +export interface LexicalTokenizer { + name: string; + odatatype: string; +} + +// @public +export type LexicalTokenizerName = string; + +// @public +export type LexicalTokenizerUnion = ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizer | PatternTokenizer | LuceneStandardTokenizer | UaxUrlEmailTokenizer | LexicalTokenizer; + +// @public +export interface LimitTokenFilter extends TokenFilter { + consumeAllTokens?: boolean; + maxTokenCount?: number; + odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; +} + +// @public +export interface ListDataSourcesResult { + dataSources: SearchIndexerDataSourceConnection[]; +} + +// @public +export interface ListIndexersResult { + indexers: SearchIndexer[]; +} + +// @public +export interface ListSkillsetsResult { + skillsets: SearchIndexerSkillset[]; +} + +// @public +export interface ListSynonymMapsResult { + synonymMaps: SynonymMap[]; +} + +// @public +export interface LuceneStandardAnalyzer extends LexicalAnalyzer { + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; + stopwords?: string[]; +} + +// @public +export interface LuceneStandardTokenizer extends LexicalTokenizer { + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2"; +} + +// @public +export interface MagnitudeScoringFunction extends ScoringFunction { + parameters: MagnitudeScoringParameters; + type: "magnitude"; +} + +// @public +export interface MagnitudeScoringParameters { + boostingRangeEnd: number; + boostingRangeStart: number; + shouldBoostBeyondRangeByConstant?: boolean; +} + +// @public +export interface MappingCharFilter extends CharFilter { + mappings: string[]; + odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; +} + +// @public +export type MarkdownHeaderDepth = string; + +// @public +export type MarkdownParsingSubmode = string; + +// @public +export interface MergeSkill extends SearchIndexerSkill { + insertPostTag?: string; + insertPreTag?: string; + odatatype: "#Microsoft.Skills.Text.MergeSkill"; +} + +// @public +export interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer { + isSearchTokenizer?: boolean; + language?: MicrosoftStemmingTokenizerLanguage; + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; +} + +// @public +export interface MicrosoftLanguageTokenizer extends LexicalTokenizer { + isSearchTokenizer?: boolean; + language?: MicrosoftTokenizerLanguage; + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; +} + +// @public +export type MicrosoftStemmingTokenizerLanguage = string; + +// @public +export type MicrosoftTokenizerLanguage = string; + +// @public +export interface NativeBlobSoftDeleteDeletionDetectionPolicy extends DataDeletionDetectionPolicy { + odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; +} + +// @public +export interface NGramTokenFilter extends TokenFilter { + maxGram?: number; + minGram?: number; + odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2"; +} + +// @public +export interface NGramTokenizer extends LexicalTokenizer { + maxGram?: number; + minGram?: number; + odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; + tokenChars?: TokenCharacterKind[]; +} + +// @public +export type OcrLineEnding = string; + +// @public +export interface OcrSkill extends SearchIndexerSkill { + defaultLanguageCode?: OcrSkillLanguage; + lineEnding?: OcrLineEnding; + odatatype: "#Microsoft.Skills.Vision.OcrSkill"; + shouldDetectOrientation?: boolean; +} + +// @public +export type OcrSkillLanguage = string; + +// @public +export interface OutputFieldMappingEntry { + name: string; + targetName?: string; +} + +// @public +export interface PathHierarchyTokenizer extends LexicalTokenizer { + delimiter?: string; + maxTokenLength?: number; + numberOfTokensToSkip?: number; + odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; + replacement?: string; + reverseTokenOrder?: boolean; +} + +// @public +export interface PatternAnalyzer extends LexicalAnalyzer { + flags?: RegexFlags; + lowerCaseTerms?: boolean; + odatatype: "#Microsoft.Azure.Search.PatternAnalyzer"; + pattern?: string; + stopwords?: string[]; +} + +// @public +export interface PatternCaptureTokenFilter extends TokenFilter { + odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; + patterns: string[]; + preserveOriginal?: boolean; +} + +// @public +export interface PatternReplaceCharFilter extends CharFilter { + odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; + pattern: string; + replacement: string; +} + +// @public +export interface PatternReplaceTokenFilter extends TokenFilter { + odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; + pattern: string; + replacement: string; +} + +// @public +export interface PatternTokenizer extends LexicalTokenizer { + flags?: RegexFlags; + group?: number; + odatatype: "#Microsoft.Azure.Search.PatternTokenizer"; + pattern?: string; +} + +// @public +export type PermissionFilter = string; + +// @public +export type PhoneticEncoder = string; + +// @public +export interface PhoneticTokenFilter extends TokenFilter { + encoder?: PhoneticEncoder; + odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; + replaceOriginalTokens?: boolean; +} + +// @public +export interface PIIDetectionSkill extends SearchIndexerSkill { + defaultLanguageCode?: string; + domain?: string; + mask?: string; + maskingMode?: PIIDetectionSkillMaskingMode; + minimumPrecision?: number; + modelVersion?: string; + odatatype: "#Microsoft.Skills.Text.PIIDetectionSkill"; + piiCategories?: string[]; +} + +// @public +export type PIIDetectionSkillMaskingMode = string; + +// @public +export type RankingOrder = string; + +// @public +export type RegexFlags = string; + +// @public +export interface RescoringOptions { + defaultOversampling?: number; + enableRescoring?: boolean; + rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; +} + +// @public +export interface ResourceCounter { + quota?: number; + usage: number; +} + +// @public +export interface ScalarQuantizationCompression extends VectorSearchCompression { + kind: "scalarQuantization"; + parameters?: ScalarQuantizationParameters; +} + +// @public +export interface ScalarQuantizationParameters { + quantizedDataType?: VectorSearchCompressionTarget; +} + +// @public +export interface ScoringFunction { + boost: number; + fieldName: string; + interpolation?: ScoringFunctionInterpolation; + type: string; +} + +// @public +export type ScoringFunctionAggregation = string; + +// @public +export type ScoringFunctionInterpolation = string; + +// @public +export type ScoringFunctionUnion = DistanceScoringFunction | FreshnessScoringFunction | MagnitudeScoringFunction | TagScoringFunction | ScoringFunction; + +// @public +export interface ScoringProfile { + functionAggregation?: ScoringFunctionAggregation; + functions?: ScoringFunctionUnion[]; + name: string; + textWeights?: TextWeights; +} + +// @public +export interface SearchAlias { + eTag?: string; + indexes: string[]; + name: string; +} + +// @public +export interface SearchField { + analyzerName?: LexicalAnalyzerName; + facetable?: boolean; + fields?: SearchField[]; + filterable?: boolean; + indexAnalyzerName?: LexicalAnalyzerName; + key?: boolean; + name: string; + normalizerName?: LexicalNormalizerName; + permissionFilter?: PermissionFilter; + retrievable?: boolean; + searchable?: boolean; + searchAnalyzerName?: LexicalAnalyzerName; + sensitivityLabel?: boolean; + sortable?: boolean; + stored?: boolean; + synonymMapNames?: string[]; + type: SearchFieldDataType; + vectorEncodingFormat?: VectorEncodingFormat; + vectorSearchDimensions?: number; + vectorSearchProfileName?: string; +} + +// @public +export type SearchFieldDataType = string; + +// @public +export interface SearchIndex { + analyzers?: LexicalAnalyzerUnion[]; + charFilters?: CharFilterUnion[]; + corsOptions?: CorsOptions; + defaultScoringProfile?: string; + description?: string; + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + fields: SearchField[]; + name: string; + normalizers?: LexicalNormalizerUnion[]; + permissionFilterOption?: SearchIndexPermissionFilterOption; + purviewEnabled?: boolean; + scoringProfiles?: ScoringProfile[]; + semanticSearch?: SemanticSearch; + similarity?: SimilarityAlgorithmUnion; + suggesters?: SearchSuggester[]; + tokenFilters?: TokenFilterUnion[]; + tokenizers?: LexicalTokenizerUnion[]; + vectorSearch?: VectorSearch; +} + +// @public +export interface SearchIndexer { + cache?: SearchIndexerCache; + dataSourceName: string; + description?: string; + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + fieldMappings?: FieldMapping[]; + isDisabled?: boolean; + name: string; + outputFieldMappings?: FieldMapping[]; + parameters?: IndexingParameters; + schedule?: IndexingSchedule; + skillsetName?: string; + targetIndexName: string; +} + +// @public +export interface SearchIndexerCache { + enableReprocessing?: boolean; + id?: string; + identity?: SearchIndexerDataIdentityUnion; + storageConnectionString?: string; +} + +// @public +export interface SearchIndexerDataContainer { + name: string; + query?: string; +} + +// @public +export interface SearchIndexerDataIdentity { + odatatype: string; +} + +// @public +export type SearchIndexerDataIdentityUnion = SearchIndexerDataNoneIdentity | SearchIndexerDataUserAssignedIdentity | SearchIndexerDataIdentity; + +// @public +export interface SearchIndexerDataNoneIdentity extends SearchIndexerDataIdentity { + odatatype: "#Microsoft.Azure.Search.DataNoneIdentity"; +} + +// @public +export interface SearchIndexerDataSourceConnection { + container: SearchIndexerDataContainer; + credentials: DataSourceCredentials; + dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion; + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion; + description?: string; + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + identity?: SearchIndexerDataIdentityUnion; + indexerPermissionOptions?: IndexerPermissionOption[]; + name: string; + subType?: string; + type: SearchIndexerDataSourceType; +} + +// @public +export type SearchIndexerDataSourceType = string; + +// @public +export interface SearchIndexerDataUserAssignedIdentity extends SearchIndexerDataIdentity { + odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity"; + resourceId: string; +} + +// @public +export interface SearchIndexerError { + details?: string; + documentationLink?: string; + errorMessage: string; + key?: string; + name?: string; + statusCode: number; +} + +// @public +export interface SearchIndexerIndexProjection { + parameters?: SearchIndexerIndexProjectionsParameters; + selectors: SearchIndexerIndexProjectionSelector[]; +} + +// @public +export interface SearchIndexerIndexProjectionSelector { + mappings: InputFieldMappingEntry[]; + parentKeyFieldName: string; + sourceContext: string; + targetIndexName: string; +} + +// @public +export interface SearchIndexerIndexProjectionsParameters { + additionalProperties?: Record; + projectionMode?: IndexProjectionMode; +} + +// @public +export interface SearchIndexerKnowledgeStore { + identity?: SearchIndexerDataIdentityUnion; + parameters?: SearchIndexerKnowledgeStoreParameters; + projections: SearchIndexerKnowledgeStoreProjection[]; + storageConnectionString: string; +} + +// @public +export interface SearchIndexerKnowledgeStoreBlobProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector { + storageContainer: string; +} + +// @public +export interface SearchIndexerKnowledgeStoreFileProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector { +} + +// @public +export interface SearchIndexerKnowledgeStoreObjectProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector { +} + +// @public +export interface SearchIndexerKnowledgeStoreParameters { + additionalProperties?: Record; + synthesizeGeneratedKeyName?: boolean; +} + +// @public +export interface SearchIndexerKnowledgeStoreProjection { + files?: SearchIndexerKnowledgeStoreFileProjectionSelector[]; + objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[]; + tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[]; +} + +// @public +export interface SearchIndexerKnowledgeStoreProjectionSelector { + generatedKeyName?: string; + inputs?: InputFieldMappingEntry[]; + referenceKeyName?: string; + source?: string; + sourceContext?: string; +} + +// @public +export interface SearchIndexerKnowledgeStoreTableProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector { + tableName: string; +} + +// @public +export interface SearchIndexerLimits { + maxDocumentContentCharactersToExtract?: number; + maxDocumentExtractionSize?: number; + maxRunTime?: string; +} + +// @public +export interface SearchIndexerSkill { + context?: string; + description?: string; + inputs: InputFieldMappingEntry[]; + name?: string; + odatatype: string; + outputs: OutputFieldMappingEntry[]; +} + +// @public +export interface SearchIndexerSkillset { + cognitiveServicesAccount?: CognitiveServicesAccountUnion; + description?: string; + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + indexProjection?: SearchIndexerIndexProjection; + knowledgeStore?: SearchIndexerKnowledgeStore; + name: string; + skills: SearchIndexerSkillUnion[]; +} + +// @public +export type SearchIndexerSkillUnion = ConditionalSkill | KeyPhraseExtractionSkill | OcrSkill | ImageAnalysisSkill | LanguageDetectionSkill | ShaperSkill | MergeSkill | EntityRecognitionSkill | SentimentSkill | SentimentSkillV3 | EntityLinkingSkill | EntityRecognitionSkillV3 | PIIDetectionSkill | SplitSkill | CustomEntityLookupSkill | TextTranslationSkill | DocumentExtractionSkill | DocumentIntelligenceLayoutSkill | WebApiSkill | AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | VisionVectorizeSkill | ContentUnderstandingSkill | ChatCompletionSkill | SearchIndexerSkill; + +// @public +export interface SearchIndexerStatus { + currentState?: IndexerCurrentState; + executionHistory: IndexerExecutionResult[]; + lastResult?: IndexerExecutionResult; + limits: SearchIndexerLimits; + name: string; + runtime?: IndexerRuntime; + status: IndexerStatus; +} + +// @public +export interface SearchIndexerWarning { + details?: string; + documentationLink?: string; + key?: string; + message: string; + name?: string; +} + +// @public +export interface SearchIndexKnowledgeSource extends KnowledgeSource { + // (undocumented) + kind: "searchIndex"; + searchIndexParameters: SearchIndexKnowledgeSourceParameters; +} + +// @public +export interface SearchIndexKnowledgeSourceParameters { + searchIndexName: string; + sourceDataSelect?: string; +} + +// @public +export type SearchIndexPermissionFilterOption = string; + +// @public +export interface SearchResourceEncryptionKey { + accessCredentials?: AzureActiveDirectoryApplicationCredentials; + identity?: SearchIndexerDataIdentityUnion; + keyName: string; + keyVersion?: string; + vaultUri: string; +} + +// @public +export interface SearchServiceStatistics { + counters: ServiceCounters; + indexersRuntime?: ServiceIndexersRuntime; + limits: ServiceLimits; +} + +// @public +export interface SearchSuggester { + name: string; + searchMode: "analyzingInfixMatching"; + sourceFields: string[]; +} + +// @public +export interface SemanticConfiguration { + flightingOptIn?: boolean; + name: string; + prioritizedFields: SemanticPrioritizedFields; + rankingOrder?: RankingOrder; +} + +// @public +export interface SemanticField { + name: string; +} + +// @public +export interface SemanticPrioritizedFields { + contentFields?: SemanticField[]; + keywordsFields?: SemanticField[]; + titleField?: SemanticField; +} + +// @public +export interface SemanticSearch { + configurations?: SemanticConfiguration[]; + defaultConfigurationName?: string; +} + +// @public +export interface SentimentSkill extends SearchIndexerSkill { + defaultLanguageCode?: SentimentSkillLanguage; + odatatype: "#Microsoft.Skills.Text.SentimentSkill"; +} + +// @public +export type SentimentSkillLanguage = string; + +// @public +export interface SentimentSkillV3 extends SearchIndexerSkill { + defaultLanguageCode?: string; + includeOpinionMining?: boolean; + modelVersion?: string; + odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill"; +} + +// @public +export interface ServiceCounters { + aliasCounter: ResourceCounter; + dataSourceCounter: ResourceCounter; + documentCounter: ResourceCounter; + indexCounter: ResourceCounter; + indexerCounter: ResourceCounter; + skillsetCounter: ResourceCounter; + storageSizeCounter: ResourceCounter; + synonymMapCounter: ResourceCounter; + vectorIndexSizeCounter: ResourceCounter; +} + +// @public +export interface ServiceLimits { + maxComplexCollectionFieldsPerIndex?: number; + maxComplexObjectsInCollectionsPerDocument?: number; + maxCumulativeIndexerRuntimeSeconds?: number; + maxFieldNestingDepthPerIndex?: number; + maxFieldsPerIndex?: number; + maxStoragePerIndexInBytes?: number; +} + +// @public +export interface ShaperSkill extends SearchIndexerSkill { + odatatype: "#Microsoft.Skills.Util.ShaperSkill"; +} + +// @public +export interface ShingleTokenFilter extends TokenFilter { + filterToken?: string; + maxShingleSize?: number; + minShingleSize?: number; + odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; + outputUnigrams?: boolean; + outputUnigramsIfNoShingles?: boolean; + tokenSeparator?: string; +} + +// @public +export interface SimilarityAlgorithm { + odatatype: string; +} + +// @public +export type SimilarityAlgorithmUnion = ClassicSimilarity | BM25Similarity | SimilarityAlgorithm; + +// @public +export interface SkillNames { + skillNames?: string[]; +} + +// @public +export interface SnowballTokenFilter extends TokenFilter { + language: SnowballTokenFilterLanguage; + odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; +} + +// @public +export type SnowballTokenFilterLanguage = string; + +// @public +export interface SoftDeleteColumnDeletionDetectionPolicy extends DataDeletionDetectionPolicy { + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; + softDeleteColumnName?: string; + softDeleteMarkerValue?: string; +} + +// @public +export interface SplitSkill extends SearchIndexerSkill { + azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; + defaultLanguageCode?: SplitSkillLanguage; + maximumPageLength?: number; + maximumPagesToTake?: number; + odatatype: "#Microsoft.Skills.Text.SplitSkill"; + pageOverlapLength?: number; + textSplitMode?: TextSplitMode; + unit?: SplitSkillUnit; +} + +// @public +export type SplitSkillEncoderModelName = string; + +// @public +export type SplitSkillLanguage = string; + +// @public +export type SplitSkillUnit = string; + +// @public +export interface SqlIntegratedChangeTrackingPolicy extends DataChangeDetectionPolicy { + odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; +} + +// @public +export interface StemmerOverrideTokenFilter extends TokenFilter { + odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; + rules: string[]; +} + +// @public +export interface StemmerTokenFilter extends TokenFilter { + language: StemmerTokenFilterLanguage; + odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; +} + +// @public +export type StemmerTokenFilterLanguage = string; + +// @public +export interface StopAnalyzer extends LexicalAnalyzer { + odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; + stopwords?: string[]; +} + +// @public +export type StopwordsList = string; + +// @public +export interface StopwordsTokenFilter extends TokenFilter { + ignoreCase?: boolean; + odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; + removeTrailingStopWords?: boolean; + stopwords?: string[]; + stopwordsList?: StopwordsList; +} + +// @public +export interface SynonymMap { + encryptionKey?: SearchResourceEncryptionKey; + eTag?: string; + format: "solr"; + name: string; + synonyms: string; +} + +// @public +export interface SynonymTokenFilter extends TokenFilter { + expand?: boolean; + ignoreCase?: boolean; + odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; + synonyms: string[]; +} + +// @public +export interface TagScoringFunction extends ScoringFunction { + parameters: TagScoringParameters; + type: "tag"; +} + +// @public +export interface TagScoringParameters { + tagsParameter: string; +} + +// @public +export type TextSplitMode = string; + +// @public +export interface TextTranslationSkill extends SearchIndexerSkill { + defaultFromLanguageCode?: TextTranslationSkillLanguage; + defaultToLanguageCode: TextTranslationSkillLanguage; + odatatype: "#Microsoft.Skills.Text.TranslationSkill"; + suggestedFrom?: TextTranslationSkillLanguage; +} + +// @public +export type TextTranslationSkillLanguage = string; + +// @public +export interface TextWeights { + weights: Record; +} + +// @public +export type TokenCharacterKind = string; + +// @public +export interface TokenFilter { + name: string; + odatatype: string; +} + +// @public +export type TokenFilterName = string; + +// @public +export type TokenFilterUnion = AsciiFoldingTokenFilter | CjkBigramTokenFilter | CommonGramTokenFilter | DictionaryDecompounderTokenFilter | EdgeNGramTokenFilter | ElisionTokenFilter | KeepTokenFilter | KeywordMarkerTokenFilter | LengthTokenFilter | LimitTokenFilter | NGramTokenFilter | PatternCaptureTokenFilter | PatternReplaceTokenFilter | PhoneticTokenFilter | ShingleTokenFilter | SnowballTokenFilter | StemmerTokenFilter | StemmerOverrideTokenFilter | StopwordsTokenFilter | SynonymTokenFilter | TruncateTokenFilter | UniqueTokenFilter | WordDelimiterTokenFilter | TokenFilter; + +// @public +export interface TruncateTokenFilter extends TokenFilter { + length?: number; + odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; +} + +// @public +export interface UaxUrlEmailTokenizer extends LexicalTokenizer { + maxTokenLength?: number; + odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; +} + +// @public +export interface UniqueTokenFilter extends TokenFilter { + odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; + onlyOnSamePosition?: boolean; +} + +// @public +export type VectorEncodingFormat = string; + +// @public +export interface VectorSearch { + algorithms?: VectorSearchAlgorithmConfigurationUnion[]; + compressions?: VectorSearchCompressionUnion[]; + profiles?: VectorSearchProfile[]; + vectorizers?: VectorSearchVectorizerUnion[]; +} + +// @public +export interface VectorSearchAlgorithmConfiguration { + kind: VectorSearchAlgorithmKind; + name: string; +} + +// @public +export type VectorSearchAlgorithmConfigurationUnion = HnswAlgorithmConfiguration | ExhaustiveKnnAlgorithmConfiguration | VectorSearchAlgorithmConfiguration; + +// @public +export type VectorSearchAlgorithmKind = string; + +// @public +export type VectorSearchAlgorithmMetric = string; + +// @public +export interface VectorSearchCompression { + compressionName: string; + defaultOversampling?: number; + kind: VectorSearchCompressionKind; + rerankWithOriginalVectors?: boolean; + rescoringOptions?: RescoringOptions; + truncationDimension?: number; +} + +// @public +export type VectorSearchCompressionKind = string; + +// @public +export type VectorSearchCompressionRescoreStorageMethod = string; + +// @public +export type VectorSearchCompressionTarget = string; + +// @public +export type VectorSearchCompressionUnion = ScalarQuantizationCompression | BinaryQuantizationCompression | VectorSearchCompression; + +// @public +export interface VectorSearchProfile { + algorithmConfigurationName: string; + compressionName?: string; + name: string; + vectorizerName?: string; +} + +// @public +export interface VectorSearchVectorizer { + kind: VectorSearchVectorizerKind; + vectorizerName: string; +} + +// @public +export type VectorSearchVectorizerKind = string; + +// @public +export type VectorSearchVectorizerUnion = AzureOpenAIVectorizer | WebApiVectorizer | AIServicesVisionVectorizer | AzureMachineLearningVectorizer | VectorSearchVectorizer; + +// @public +export interface VisionVectorizeSkill extends SearchIndexerSkill { + modelVersion: string; + odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; +} + +// @public +export type VisualFeature = string; + +// @public +export interface WebApiHttpHeaders { + additionalProperties?: Record; +} + +// @public +export interface WebApiSkill extends SearchIndexerSkill { + authIdentity?: SearchIndexerDataIdentityUnion; + authResourceId?: string; + batchSize?: number; + degreeOfParallelism?: number; + httpHeaders?: Record; + httpMethod?: string; + odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; + timeout?: string; + uri: string; +} + +// @public +export interface WebApiVectorizer extends VectorSearchVectorizer { + kind: "customWebApi"; + webApiParameters?: WebApiVectorizerParameters; +} + +// @public +export interface WebApiVectorizerParameters { + authIdentity?: SearchIndexerDataIdentityUnion; + authResourceId?: string; + httpHeaders?: Record; + httpMethod?: string; + timeout?: string; + url?: string; +} + +// @public +export interface WordDelimiterTokenFilter extends TokenFilter { + catenateAll?: boolean; + catenateNumbers?: boolean; + catenateWords?: boolean; + generateNumberParts?: boolean; + generateWordParts?: boolean; + odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; + preserveOriginal?: boolean; + protectedWords?: string[]; + splitOnCaseChange?: boolean; + splitOnNumerics?: boolean; + stemEnglishPossessive?: boolean; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-models-azure-search-documents-knowledgeBase-node.api.md b/sdk/search/search-documents/review/search-documents-models-azure-search-documents-knowledgeBase-node.api.md new file mode 100644 index 000000000000..c64be40678e0 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-models-azure-search-documents-knowledgeBase-node.api.md @@ -0,0 +1,296 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +// @public +export interface AzureBlobKnowledgeSourceParams extends KnowledgeSourceParams { + kind: "azureBlob"; +} + +// @public +export interface IndexedOneLakeKnowledgeSourceParams extends KnowledgeSourceParams { + kind: "indexedOneLake"; +} + +// @public +export interface IndexedSharePointKnowledgeSourceParams extends KnowledgeSourceParams { + kind: "indexedSharePoint"; +} + +// @public +export interface KnowledgeBaseActivityRecord { + elapsedMs?: number; + error?: KnowledgeBaseErrorDetail; + id: number; + type: KnowledgeBaseActivityRecordType; +} + +// @public +export type KnowledgeBaseActivityRecordUnion = KnowledgeBaseModelQueryPlanningActivityRecord | KnowledgeBaseModelAnswerSynthesisActivityRecord | KnowledgeBaseAgenticReasoningActivityRecord | KnowledgeBaseActivityRecord; + +// @public +export interface KnowledgeBaseAgenticReasoningActivityRecord extends KnowledgeBaseActivityRecord { + reasoningTokens?: number; + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; + type: "agenticReasoning"; +} + +// @public +export interface KnowledgeBaseAzureBlobReference extends KnowledgeBaseReference { + blobUrl?: string; + type: "azureBlob"; +} + +// @public +export interface KnowledgeBaseErrorAdditionalInfo { + info?: Record; + type?: string; +} + +// @public +export interface KnowledgeBaseErrorDetail { + additionalInfo?: KnowledgeBaseErrorAdditionalInfo[]; + code?: string; + details?: KnowledgeBaseErrorDetail[]; + message?: string; + target?: string; +} + +// @public +export interface KnowledgeBaseImageContent { + url: string; +} + +// @public +export interface KnowledgeBaseIndexedOneLakeReference extends KnowledgeBaseReference { + docUrl?: string; + type: "indexedOneLake"; +} + +// @public +export interface KnowledgeBaseIndexedSharePointReference extends KnowledgeBaseReference { + docUrl?: string; + type: "indexedSharePoint"; +} + +// @public +export interface KnowledgeBaseMessage { + content: KnowledgeBaseMessageContentUnion[]; + role?: string; +} + +// @public +export interface KnowledgeBaseMessageContent { + type: KnowledgeBaseMessageContentType; +} + +// @public +export type KnowledgeBaseMessageContentType = string; + +// @public +export type KnowledgeBaseMessageContentUnion = KnowledgeBaseMessageTextContent | KnowledgeBaseMessageImageContent | KnowledgeBaseMessageContent; + +// @public +export interface KnowledgeBaseMessageImageContent extends KnowledgeBaseMessageContent { + image: KnowledgeBaseImageContent; + type: "image"; +} + +// @public +export interface KnowledgeBaseMessageTextContent extends KnowledgeBaseMessageContent { + text: string; + type: "text"; +} + +// @public +export interface KnowledgeBaseModelAnswerSynthesisActivityRecord extends KnowledgeBaseActivityRecord { + inputTokens?: number; + outputTokens?: number; + type: "modelAnswerSynthesis"; +} + +// @public +export interface KnowledgeBaseModelQueryPlanningActivityRecord extends KnowledgeBaseActivityRecord { + inputTokens?: number; + outputTokens?: number; + type: "modelQueryPlanning"; +} + +// @public +export interface KnowledgeBaseReference { + activitySource: number; + id: string; + rerankerScore?: number; + sourceData?: Record; + type: KnowledgeBaseReferenceType; +} + +// @public +export type KnowledgeBaseReferenceUnion = KnowledgeBaseSearchIndexReference | KnowledgeBaseAzureBlobReference | KnowledgeBaseIndexedSharePointReference | KnowledgeBaseIndexedOneLakeReference | KnowledgeBaseWebReference | KnowledgeBaseRemoteSharePointReference | KnowledgeBaseReference; + +// @public +export interface KnowledgeBaseRemoteSharePointReference extends KnowledgeBaseReference { + searchSensitivityLabelInfo?: SharePointSensitivityLabelInfo; + type: "remoteSharePoint"; + webUrl: string; +} + +// @public +export interface KnowledgeBaseRetrievalRequest { + includeActivity?: boolean; + intents?: KnowledgeRetrievalIntentUnion[]; + knowledgeSourceParams?: KnowledgeSourceParamsUnion[]; + maxOutputSize?: number; + maxRuntimeInSeconds?: number; + messages?: KnowledgeBaseMessage[]; + outputMode?: KnowledgeRetrievalOutputMode; + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; +} + +// @public +export interface KnowledgeBaseRetrievalResponse { + activity?: KnowledgeBaseActivityRecordUnion[]; + references?: KnowledgeBaseReferenceUnion[]; + response?: KnowledgeBaseMessage[]; +} + +// @public +export interface KnowledgeBaseSearchIndexReference extends KnowledgeBaseReference { + docKey?: string; + type: "searchIndex"; +} + +// @public +export interface KnowledgeBaseWebReference extends KnowledgeBaseReference { + title?: string; + type: "web"; + url: string; +} + +// @public +export interface KnowledgeRetrievalHighReasoningEffort extends KnowledgeRetrievalReasoningEffort { + kind: "high"; +} + +// @public +export interface KnowledgeRetrievalIntent { + type: KnowledgeRetrievalIntentType; +} + +// @public +export type KnowledgeRetrievalIntentType = string; + +// @public +export type KnowledgeRetrievalIntentUnion = KnowledgeRetrievalSemanticIntent | KnowledgeRetrievalIntent; + +// @public +export interface KnowledgeRetrievalLowReasoningEffort extends KnowledgeRetrievalReasoningEffort { + kind: "low"; +} + +// @public +export interface KnowledgeRetrievalMediumReasoningEffort extends KnowledgeRetrievalReasoningEffort { + kind: "medium"; +} + +// @public +export interface KnowledgeRetrievalMinimalReasoningEffort extends KnowledgeRetrievalReasoningEffort { + kind: "minimal"; +} + +// @public +export type KnowledgeRetrievalOutputMode = string; + +// @public +export interface KnowledgeRetrievalReasoningEffort { + kind: KnowledgeRetrievalReasoningEffortKind; +} + +// @public +export type KnowledgeRetrievalReasoningEffortKind = string; + +// @public +export type KnowledgeRetrievalReasoningEffortUnion = KnowledgeRetrievalMinimalReasoningEffort | KnowledgeRetrievalLowReasoningEffort | KnowledgeRetrievalMediumReasoningEffort | KnowledgeRetrievalHighReasoningEffort | KnowledgeRetrievalReasoningEffort; + +// @public +export interface KnowledgeRetrievalSemanticIntent extends KnowledgeRetrievalIntent { + search: string; + type: "semantic"; +} + +// @public +export interface KnowledgeSourceParams { + alwaysQuerySource?: boolean; + includeReferences?: boolean; + includeReferenceSourceData?: boolean; + kind: KnowledgeSourceKind; + knowledgeSourceName: string; + rerankerThreshold?: number; +} + +// @public +export type KnowledgeSourceParamsUnion = SearchIndexKnowledgeSourceParams | AzureBlobKnowledgeSourceParams | IndexedSharePointKnowledgeSourceParams | IndexedOneLakeKnowledgeSourceParams | WebKnowledgeSourceParams | RemoteSharePointKnowledgeSourceParams | KnowledgeSourceParams; + +// @public +export enum KnownKnowledgeBaseMessageContentType { + Image = "image", + Text = "text" +} + +// @public +export enum KnownKnowledgeRetrievalIntentType { + Semantic = "semantic" +} + +// @public +export enum KnownKnowledgeRetrievalOutputMode { + AnswerSynthesis = "answerSynthesis", + ExtractiveData = "extractiveData" +} + +// @public +export enum KnownKnowledgeRetrievalReasoningEffortKind { + High = "high", + Low = "low", + Medium = "medium", + Minimal = "minimal" +} + +// @public +export interface RemoteSharePointKnowledgeSourceParams extends KnowledgeSourceParams { + containerTypeId?: string; + filterExpression?: string; + kind: "remoteSharePoint"; + resourceMetadata?: string[]; +} + +// @public +export interface SearchIndexKnowledgeSourceParams extends KnowledgeSourceParams { + filterAddOn?: string; + kind: "searchIndex"; +} + +// @public +export interface SharePointSensitivityLabelInfo { + color?: string; + displayName?: string; + isEncrypted?: boolean; + priority?: number; + sensitivityLabelId?: string; + tooltip?: string; +} + +// @public +export interface WebKnowledgeSourceParams extends KnowledgeSourceParams { + count?: number; + freshness?: string; + kind: "web"; + language?: string; + market?: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-models-azure-search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-models-azure-search-documents-node.api.md new file mode 100644 index 000000000000..e67d0ab27cef --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-models-azure-search-documents-node.api.md @@ -0,0 +1,572 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +// @public +export interface AutocompleteItem { + queryPlusText: string; + text: string; +} + +// @public +export type AutocompleteMode = string; + +// @public +export interface AutocompleteResult { + coverage?: number; + results: AutocompleteItem[]; +} + +// @public +export interface DebugInfo { + readonly queryRewrites?: QueryRewritesDebugInfo; +} + +// @public +export interface DocumentDebugInfo { + readonly innerHits?: Record; + readonly semantic?: SemanticDebugInfo; + readonly vectors?: VectorsDebugInfo; +} + +// @public +export interface ErrorAdditionalInfo { + info?: Record; + type?: string; +} + +// @public +export interface ErrorDetail { + additionalInfo?: ErrorAdditionalInfo[]; + code?: string; + details?: ErrorDetail[]; + message?: string; + target?: string; +} + +// @public +export interface ErrorResponse { + error?: ErrorDetail; +} + +// @public +export interface FacetResult { + additionalProperties?: Record; + count?: number; + readonly facets?: Record; + readonly sum?: number; +} + +// @public +export type HybridCountAndFacetMode = string; + +// @public +export interface HybridSearch { + countAndFacetMode?: HybridCountAndFacetMode; + maxTextRecallSize?: number; +} + +// @public +export interface IndexAction { + actionType?: IndexActionType; + additionalProperties?: Record; +} + +// @public +export type IndexActionType = string; + +// @public +export interface IndexDocumentsBatch { + actions: IndexAction[]; +} + +// @public +export interface IndexDocumentsResult { + results: IndexingResult[]; +} + +// @public +export interface IndexingResult { + errorMessage?: string; + key: string; + statusCode: number; + succeeded: boolean; +} + +// @public +export enum KnownAutocompleteMode { + OneTerm = "oneTerm", + OneTermWithContext = "oneTermWithContext", + TwoTerms = "twoTerms" +} + +// @public +export enum KnownHybridCountAndFacetMode { + CountAllResults = "countAllResults", + CountRetrievableResults = "countRetrievableResults" +} + +// @public +export enum KnownIndexActionType { + Delete = "delete", + Merge = "merge", + MergeOrUpload = "mergeOrUpload", + Upload = "upload" +} + +// @public +export enum KnownQueryAnswerType { + Extractive = "extractive", + None = "none" +} + +// @public +export enum KnownQueryCaptionType { + Extractive = "extractive", + None = "none" +} + +// @public +export enum KnownQueryDebugMode { + All = "all", + Disabled = "disabled", + InnerHits = "innerHits", + QueryRewrites = "queryRewrites", + Semantic = "semantic", + Vector = "vector" +} + +// @public +export enum KnownQueryLanguage { + ArEg = "ar-eg", + ArJo = "ar-jo", + ArKw = "ar-kw", + ArMa = "ar-ma", + ArSa = "ar-sa", + BgBg = "bg-bg", + BnIn = "bn-in", + CaEs = "ca-es", + CsCz = "cs-cz", + DaDk = "da-dk", + DeDe = "de-de", + ElGr = "el-gr", + EnAu = "en-au", + EnCa = "en-ca", + EnGb = "en-gb", + EnIn = "en-in", + EnUs = "en-us", + EsEs = "es-es", + EsMx = "es-mx", + EtEe = "et-ee", + EuEs = "eu-es", + FaAe = "fa-ae", + FiFi = "fi-fi", + FrCa = "fr-ca", + FrFr = "fr-fr", + GaIe = "ga-ie", + GlEs = "gl-es", + GuIn = "gu-in", + HeIl = "he-il", + HiIn = "hi-in", + HrBa = "hr-ba", + HrHr = "hr-hr", + HuHu = "hu-hu", + HyAm = "hy-am", + IdId = "id-id", + IsIs = "is-is", + ItIt = "it-it", + JaJp = "ja-jp", + KnIn = "kn-in", + KoKr = "ko-kr", + LtLt = "lt-lt", + LvLv = "lv-lv", + MlIn = "ml-in", + MrIn = "mr-in", + MsBn = "ms-bn", + MsMy = "ms-my", + NbNo = "nb-no", + NlBe = "nl-be", + NlNl = "nl-nl", + None = "none", + NoNo = "no-no", + PaIn = "pa-in", + PlPl = "pl-pl", + PtBr = "pt-br", + PtPt = "pt-pt", + RoRo = "ro-ro", + RuRu = "ru-ru", + SkSk = "sk-sk", + SlSl = "sl-sl", + SrBa = "sr-ba", + SrMe = "sr-me", + SrRs = "sr-rs", + SvSe = "sv-se", + TaIn = "ta-in", + TeIn = "te-in", + ThTh = "th-th", + TrTr = "tr-tr", + UkUa = "uk-ua", + UrPk = "ur-pk", + ViVn = "vi-vn", + ZhCn = "zh-cn", + ZhTw = "zh-tw" +} + +// @public +export enum KnownQueryRewritesType { + Generative = "generative", + None = "none" +} + +// @public +export enum KnownQuerySpellerType { + Lexicon = "lexicon", + None = "none" +} + +// @public +export enum KnownQueryType { + Full = "full", + Semantic = "semantic", + Simple = "simple" +} + +// @public +export enum KnownScoringStatistics { + Global = "global", + Local = "local" +} + +// @public +export enum KnownSearchMode { + All = "all", + Any = "any" +} + +// @public +export enum KnownSemanticErrorMode { + Fail = "fail", + Partial = "partial" +} + +// @public +export enum KnownSemanticErrorReason { + CapacityOverloaded = "capacityOverloaded", + MaxWaitExceeded = "maxWaitExceeded", + Transient = "transient" +} + +// @public +export enum KnownSemanticFieldState { + Partial = "partial", + Unused = "unused", + Used = "used" +} + +// @public +export enum KnownSemanticQueryRewritesResultType { + OriginalQueryOnly = "originalQueryOnly" +} + +// @public +export enum KnownSemanticSearchResultsType { + BaseResults = "baseResults", + RerankedResults = "rerankedResults" +} + +// @public +export enum KnownVectorFilterMode { + PostFilter = "postFilter", + PreFilter = "preFilter", + StrictPostFilter = "strictPostFilter" +} + +// @public +export enum KnownVectorQueryKind { + ImageBinary = "imageBinary", + ImageUrl = "imageUrl", + Text = "text", + Vector = "vector" +} + +// @public +export enum KnownVectorThresholdKind { + SearchScore = "searchScore", + VectorSimilarity = "vectorSimilarity" +} + +// @public +export interface LookupDocument { + additionalProperties?: Record; +} + +// @public +export interface QueryAnswerResult { + additionalProperties?: Record; + highlights?: string; + key?: string; + score?: number; + text?: string; +} + +// @public +export type QueryAnswerType = string; + +// @public +export interface QueryCaptionResult { + additionalProperties?: Record; + highlights?: string; + text?: string; +} + +// @public +export type QueryCaptionType = string; + +// @public +export type QueryDebugMode = string; + +// @public +export type QueryLanguage = string; + +// @public +export interface QueryResultDocumentInnerHit { + readonly ordinal?: number; + readonly vectors?: Record[]; +} + +// @public +export interface QueryResultDocumentRerankerInput { + readonly content?: string; + readonly keywords?: string; + readonly title?: string; +} + +// @public +export interface QueryResultDocumentSemanticField { + readonly name?: string; + readonly state?: SemanticFieldState; +} + +// @public +export interface QueryResultDocumentSubscores { + readonly documentBoost?: number; + readonly text?: TextResult; + readonly vectors?: Record[]; +} + +// @public +export interface QueryRewritesDebugInfo { + readonly text?: QueryRewritesValuesDebugInfo; + readonly vectors?: QueryRewritesValuesDebugInfo[]; +} + +// @public +export type QueryRewritesType = string; + +// @public +export interface QueryRewritesValuesDebugInfo { + readonly inputQuery?: string; + readonly rewrites?: string[]; +} + +// @public +export type QuerySpellerType = string; + +// @public +export type QueryType = string; + +// @public +export type ScoringStatistics = string; + +// @public +export interface SearchDocumentsResult { + readonly answers?: QueryAnswerResult[]; + readonly count?: number; + readonly coverage?: number; + readonly debugInfo?: DebugInfo; + readonly facets?: Record; + readonly nextLink?: string; + readonly nextPageParameters?: SearchRequest; + readonly results: SearchResult[]; + readonly semanticPartialResponseReason?: SemanticErrorReason; + readonly semanticPartialResponseType?: SemanticSearchResultsType; + readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; +} + +// @public +export type SearchMode = string; + +// @public +export interface SearchRequest { + answers?: QueryAnswerType; + captions?: QueryCaptionType; + debug?: QueryDebugMode; + facets?: string[]; + filter?: string; + highlightFields?: string; + highlightPostTag?: string; + highlightPreTag?: string; + hybridSearch?: HybridSearch; + includeTotalCount?: boolean; + minimumCoverage?: number; + orderBy?: string; + queryLanguage?: QueryLanguage; + queryRewrites?: QueryRewritesType; + querySpeller?: QuerySpellerType; + queryType?: QueryType; + scoringParameters?: string[]; + scoringProfile?: string; + scoringStatistics?: ScoringStatistics; + searchFields?: string; + searchMode?: SearchMode; + searchText?: string; + select?: string; + semanticConfigurationName?: string; + semanticErrorHandling?: SemanticErrorMode; + semanticFields?: string; + semanticMaxWaitInMilliseconds?: number; + semanticQuery?: string; + sessionId?: string; + skip?: number; + top?: number; + vectorFilterMode?: VectorFilterMode; + vectorQueries?: VectorQueryUnion[]; +} + +// @public +export interface SearchResult { + additionalProperties?: Record; + captions?: QueryCaptionResult[]; + readonly documentDebugInfo?: DocumentDebugInfo[]; + highlights?: Record; + rerankerBoostedScore?: number; + rerankerScore?: number; + score: number; +} + +// @public +export interface SearchScoreThreshold extends VectorThreshold { + kind: "searchScore"; + value: number; +} + +// @public +export interface SemanticDebugInfo { + readonly contentFields?: QueryResultDocumentSemanticField[]; + readonly keywordFields?: QueryResultDocumentSemanticField[]; + readonly rerankerInput?: QueryResultDocumentRerankerInput; + readonly titleField?: QueryResultDocumentSemanticField; +} + +// @public +export type SemanticErrorMode = string; + +// @public +export type SemanticErrorReason = string; + +// @public +export type SemanticFieldState = string; + +// @public +export type SemanticQueryRewritesResultType = string; + +// @public +export type SemanticSearchResultsType = string; + +// @public +export interface SingleVectorFieldResult { + readonly searchScore?: number; + readonly vectorSimilarity?: number; +} + +// @public +export interface SuggestDocumentsResult { + coverage?: number; + results: SuggestResult[]; +} + +// @public +export interface SuggestResult { + additionalProperties?: Record; + text: string; +} + +// @public +export interface TextResult { + readonly searchScore?: number; +} + +// @public +export type VectorFilterMode = string; + +// @public +export interface VectorizableImageBinaryQuery extends VectorQuery { + base64Image?: string; + kind: "imageBinary"; +} + +// @public +export interface VectorizableImageUrlQuery extends VectorQuery { + kind: "imageUrl"; + url?: string; +} + +// @public +export interface VectorizableTextQuery extends VectorQuery { + kind: "text"; + queryRewrites?: QueryRewritesType; + text: string; +} + +// @public +export interface VectorizedQuery extends VectorQuery { + kind: "vector"; + vector: number[]; +} + +// @public +export interface VectorQuery { + exhaustive?: boolean; + fields?: string; + filterOverride?: string; + kind: VectorQueryKind; + kNearestNeighbors?: number; + oversampling?: number; + perDocumentVectorLimit?: number; + threshold?: VectorThresholdUnion; + weight?: number; +} + +// @public +export type VectorQueryKind = string; + +// @public +export type VectorQueryUnion = VectorizedQuery | VectorizableTextQuery | VectorizableImageUrlQuery | VectorizableImageBinaryQuery | VectorQuery; + +// @public +export interface VectorsDebugInfo { + readonly subscores?: QueryResultDocumentSubscores; +} + +// @public +export interface VectorSimilarityThreshold extends VectorThreshold { + kind: "vectorSimilarity"; + value: number; +} + +// @public +export interface VectorThreshold { + kind: VectorThresholdKind; +} + +// @public +export type VectorThresholdKind = string; + +// @public +export type VectorThresholdUnion = VectorSimilarityThreshold | SearchScoreThreshold | VectorThreshold; + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-models-node.api.md b/sdk/search/search-documents/review/search-documents-models-node.api.md new file mode 100644 index 000000000000..78dce7085cd6 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-models-node.api.md @@ -0,0 +1,222 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +// @public +export interface AIServices { + apiKey?: string; + uri: string; +} + +// @public +export interface CompletedSynchronizationState { + endTime: Date; + itemsSkipped: number; + itemsUpdatesFailed: number; + itemsUpdatesProcessed: number; + startTime: Date; +} + +// @public +export interface IndexedOneLakeKnowledgeSource extends KnowledgeSource { + indexedOneLakeParameters: IndexedOneLakeKnowledgeSourceParameters; + // (undocumented) + kind: "indexedOneLake"; +} + +// @public +export interface IndexedOneLakeKnowledgeSourceParameters { + fabricWorkspaceId: string; + ingestionParameters?: KnowledgeSourceIngestionParameters; + lakehouseId: string; + targetPath?: string; +} + +// @public +export interface IndexedSharePointKnowledgeSource extends KnowledgeSource { + indexedSharePointParameters: IndexedSharePointKnowledgeSourceParameters; + // (undocumented) + kind: "indexedSharePoint"; +} + +// @public +export interface IndexedSharePointKnowledgeSourceParameters { + connectionString: string; + containerName: string; + identity?: SearchIndexerDataIdentityUnion; + ingestionParameters?: KnowledgeSourceIngestionParameters; + query?: string; +} + +// @public +export interface IndexerRuntime { + beginningTime: Date; + endingTime: Date; + remainingSeconds?: number; + usedSeconds: number; +} + +// @public +export type KnowledgeBaseActivityRecordType = string; + +// @public +export type KnowledgeBaseReferenceType = string; + +// @public +export type KnowledgeSourceContentExtractionMode = string; + +// @public +export interface KnowledgeSourceIngestionParameters { + aiServices?: AIServices; + allowSkillsetToReadFileData?: boolean; + contentExtractionMode?: KnowledgeSourceContentExtractionMode; + dataToExtract?: BlobIndexerDataToExtract; + delimitedTextDelimiter?: string; + delimitedTextHeaders?: string; + documentRoot?: string; + excludedFileNameExtensions?: string[]; + failOnUnprocessableDocument?: boolean; + failOnUnsupportedContentType?: boolean; + firstLineContainsHeaders?: boolean; + imageAction?: BlobIndexerImageAction; + indexedFileNameExtensions?: string[]; + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + ingestionPermissionOptions?: KnowledgeSourceIngestionPermissionOption[]; + ingestionSchedule?: IndexingSchedule; + markdownHeaderDepth?: MarkdownHeaderDepth; + markdownParsingSubmode?: MarkdownParsingSubmode; + maxDocumentExtractionSize?: number; + maxItemsToExtract?: number; + parsingMode?: BlobIndexerParsingMode; + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; +} + +// @public +export type KnowledgeSourceIngestionPermissionOption = string; + +// @public +export interface KnowledgeSourceStatistics { + averageItemsProcessedPerSynchronization: number; + averageSynchronizationDuration: string; + totalSynchronization: number; +} + +// @public +export interface KnowledgeSourceStatus { + createdResources?: CreatedResources; + currentSynchronizationState?: SynchronizationState; + lastSynchronizationState?: CompletedSynchronizationState; + statistics?: KnowledgeSourceStatistics; + synchronizationStatus?: KnowledgeSourceSynchronizationStatus; +} + +// @public +export type KnowledgeSourceSynchronizationStatus = string; + +// @public +export enum KnownKnowledgeBaseActivityRecordType { + AgenticReasoning = "agenticReasoning", + AzureBlob = "azureBlob", + IndexedOneLake = "indexedOneLake", + IndexedSharePoint = "indexedSharePoint", + ModelAnswerSynthesis = "modelAnswerSynthesis", + ModelQueryPlanning = "modelQueryPlanning", + RemoteSharePoint = "remoteSharePoint", + SearchIndex = "searchIndex", + Web = "web" +} + +// @public +export enum KnownKnowledgeBaseReferenceType { + AzureBlob = "azureBlob", + IndexedOneLake = "indexedOneLake", + IndexedSharePoint = "indexedSharePoint", + RemoteSharePoint = "remoteSharePoint", + SearchIndex = "searchIndex", + Web = "web" +} + +// @public +export enum KnownKnowledgeSourceContentExtractionMode { + Minimal = "minimal", + Standard = "standard" +} + +// @public +export enum KnownKnowledgeSourceIngestionPermissionOption { + GroupIds = "groupIds", + RbacScope = "rbacScope", + UserIds = "userIds" +} + +// @public +export enum KnownKnowledgeSourceSynchronizationStatus { + Active = "active", + Creating = "creating", + Deleting = "deleting" +} + +// @public +export enum KnownVersions { + V20251101Preview = "2025-11-01-preview" +} + +// @public +export interface RemoteSharePointKnowledgeSource extends KnowledgeSource { + // (undocumented) + kind: "remoteSharePoint"; + remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters; +} + +// @public +export interface RemoteSharePointKnowledgeSourceParameters { + containerTypeId?: string; + filterExpression?: string; + resourceMetadata?: string[]; +} + +// @public +export interface ServiceIndexersRuntime { + beginningTime: Date; + endingTime: Date; + remainingSeconds?: number; + usedSeconds: number; +} + +// @public +export interface SynchronizationState { + itemsSkipped: number; + itemsUpdatesFailed: number; + itemsUpdatesProcessed: number; + startTime: Date; +} + +// @public +export interface WebKnowledgeSource extends KnowledgeSource { + // (undocumented) + kind: "web"; + webParameters?: WebKnowledgeSourceParameters; +} + +// @public +export interface WebKnowledgeSourceDomain { + address: string; + includeSubpages?: boolean; +} + +// @public +export interface WebKnowledgeSourceDomains { + allowedDomains?: WebKnowledgeSourceDomain[]; + blockedDomains?: WebKnowledgeSourceDomain[]; +} + +// @public +export interface WebKnowledgeSourceParameters { + domains?: WebKnowledgeSourceDomains; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index 91308ed39327..1e1228c88799 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -5,24 +5,16 @@ ```ts import { AzureKeyCredential } from '@azure/core-auth'; -import * as coreClient from '@azure/core-client'; -import type { ExtendedCommonClientOptions } from '@azure/core-http-compat'; +import type { ClientOptions } from '@azure-rest/core-client'; import type { KeyCredential } from '@azure/core-auth'; -import type { OperationOptions } from '@azure/core-client'; -import type { PagedAsyncIterableIterator } from '@azure/core-paging'; -import type { Pipeline } from '@azure/core-rest-pipeline'; +import type { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; import type { RestError } from '@azure/core-rest-pipeline'; import type { TokenCredential } from '@azure/core-auth'; // @public export type AIFoundryModelCatalogName = string; -// @public -export interface AIServices { - apiKey?: string; - uri: string; -} - // @public export interface AIServicesAccountIdentity extends BaseCognitiveServicesAccount { identity?: SearchIndexerDataIdentity; @@ -51,15 +43,17 @@ export interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer { parameters?: AIServicesVisionParameters; } +// Warning: (ae-forgotten-export) The symbol "PagedAsyncIterableIterator" needs to be exported by the entry point index.d.ts +// // @public export type AliasIterator = PagedAsyncIterableIterator; // @public export interface AnalyzedTokenInfo { - readonly endOffset: number; - readonly position: number; - readonly startOffset: number; - readonly token: string; + endOffset: number; + position: number; + startOffset: number; + token: string; } // @public @@ -88,12 +82,12 @@ export interface AsciiFoldingTokenFilter extends BaseTokenFilter { // @public export interface AutocompleteItem { - readonly queryPlusText: string; - readonly text: string; + queryPlusText: string; + text: string; } // @public -export type AutocompleteMode = "oneTerm" | "twoTerms" | "oneTermWithContext"; +export type AutocompleteMode = string; // @public export type AutocompleteOptions = OperationOptions & AutocompleteRequest; @@ -112,8 +106,8 @@ export interface AutocompleteRequest { // @public export interface AutocompleteResult { - readonly coverage?: number; - readonly results: AutocompleteItem[]; + coverage?: number; + results: AutocompleteItem[]; } // @public @@ -184,7 +178,7 @@ export interface AzureOpenAIParameters { resourceUrl?: string; } -// @public (undocumented) +// @public export interface AzureOpenAITokenizerParameters { allowedSpecialTokens?: string[]; encoderModelName?: SplitSkillEncoderModelName; @@ -205,23 +199,23 @@ export interface BaseAzureMachineLearningVectorizerParameters { // @public export interface BaseCharFilter { name: string; - odatatype: "#Microsoft.Azure.Search.MappingCharFilter" | "#Microsoft.Azure.Search.PatternReplaceCharFilter"; + odatatype: string; } // @public export interface BaseCognitiveServicesAccount { description?: string; - odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey" | "#Microsoft.Azure.Search.AIServicesByKey" | "#Microsoft.Azure.Search.AIServicesByIdentity"; + odatatype: string; } // @public export interface BaseDataChangeDetectionPolicy { - odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" | "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; + odatatype: string; } // @public export interface BaseDataDeletionDetectionPolicy { - odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; + odatatype: string; } // @public @@ -229,17 +223,20 @@ export interface BaseKnowledgeBaseActivityRecord { elapsedMs?: number; error?: KnowledgeBaseErrorDetail; id: number; - type: "KnowledgeBaseRetrievalActivityRecord" | "searchIndex" | "azureBlob" | "indexedSharePoint" | "indexedOneLake" | "web" | "remoteSharePoint" | "modelQueryPlanning" | "modelAnswerSynthesis" | "agenticReasoning"; + // Warning: (ae-forgotten-export) The symbol "KnowledgeBaseActivityRecordType" needs to be exported by the entry point index.d.ts + type: KnowledgeBaseActivityRecordType; } // @public export interface BaseKnowledgeBaseMessageContent { - type: "text" | "image"; + // Warning: (ae-forgotten-export) The symbol "KnowledgeBaseMessageContentType" needs to be exported by the entry point index.d.ts + type: KnowledgeBaseMessageContentType; } // @public export interface BaseKnowledgeBaseModel { - kind: "azureOpenAI"; + // Warning: (ae-forgotten-export) The symbol "KnowledgeBaseModelKind" needs to be exported by the entry point index.d.ts + kind: KnowledgeBaseModelKind; } // @public @@ -247,29 +244,21 @@ export interface BaseKnowledgeBaseReference { activitySource: number; id: string; rerankerScore?: number; - sourceData?: { - [propertyName: string]: any; - }; - type: "searchIndex" | "azureBlob" | "indexedSharePoint" | "indexedOneLake" | "web" | "remoteSharePoint"; + sourceData?: Record; + // Warning: (ae-forgotten-export) The symbol "KnowledgeBaseReferenceType" needs to be exported by the entry point index.d.ts + type: KnowledgeBaseReferenceType; } // @public -export interface BaseKnowledgeBaseRetrievalActivityRecord extends BaseKnowledgeBaseActivityRecord { - count?: number; - knowledgeSourceName?: string; - queryTime?: Date; - type: "KnowledgeBaseRetrievalActivityRecord" | "searchIndex" | "azureBlob" | "indexedSharePoint" | "indexedOneLake" | "web" | "remoteSharePoint"; -} - -// @public (undocumented) -export type BaseKnowledgeRetrievalIntent = KnowledgeRetrievalIntent | KnowledgeRetrievalSemanticIntent; +export type BaseKnowledgeRetrievalIntent = KnowledgeRetrievalSemanticIntent | KnowledgeRetrievalIntent; // @public export type BaseKnowledgeRetrievalOutputMode = string; -// @public (undocumented) +// @public export interface BaseKnowledgeRetrievalReasoningEffort { - kind: "minimal" | "low" | "medium"; + // Warning: (ae-forgotten-export) The symbol "KnowledgeRetrievalReasoningEffortKind" needs to be exported by the entry point index.d.ts + kind: KnowledgeRetrievalReasoningEffortKind; } // @public @@ -281,37 +270,33 @@ export interface BaseKnowledgeSource { name: string; } -// @public (undocumented) +// @public export interface BaseKnowledgeSourceParams { alwaysQuerySource?: boolean; includeReferences?: boolean; includeReferenceSourceData?: boolean; - kind: "searchIndex" | "azureBlob" | "indexedSharePoint" | "indexedOneLake" | "web" | "remoteSharePoint"; + // Warning: (ae-forgotten-export) The symbol "KnowledgeSourceKind" needs to be exported by the entry point index.d.ts + kind: KnowledgeSourceKind; knowledgeSourceName: string; rerankerThreshold?: number; } -// @public -export interface BaseKnowledgeSourceVectorizer { - kind: "azureOpenAI"; -} - // @public export interface BaseLexicalAnalyzer { name: string; - odatatype: "#Microsoft.Azure.Search.CustomAnalyzer" | "#Microsoft.Azure.Search.PatternAnalyzer" | "#Microsoft.Azure.Search.StandardAnalyzer" | "#Microsoft.Azure.Search.StopAnalyzer"; + odatatype: string; } // @public export interface BaseLexicalNormalizer { name: string; - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; + odatatype: string; } // @public export interface BaseLexicalTokenizer { name: string; - odatatype: "#Microsoft.Azure.Search.ClassicTokenizer" | "#Microsoft.Azure.Search.EdgeNGramTokenizer" | "#Microsoft.Azure.Search.KeywordTokenizer" | "#Microsoft.Azure.Search.KeywordTokenizerV2" | "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer" | "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" | "#Microsoft.Azure.Search.NGramTokenizer" | "#Microsoft.Azure.Search.PathHierarchyTokenizerV2" | "#Microsoft.Azure.Search.PatternTokenizer" | "#Microsoft.Azure.Search.StandardTokenizer" | "#Microsoft.Azure.Search.StandardTokenizerV2" | "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; + odatatype: string; } // @public @@ -319,12 +304,12 @@ export interface BaseScoringFunction { boost: number; fieldName: string; interpolation?: ScoringFunctionInterpolation; - type: "distance" | "freshness" | "magnitude" | "tag"; + type: string; } // @public export interface BaseSearchIndexerDataIdentity { - odatatype: "#Microsoft.Azure.Search.DataNoneIdentity" | "#Microsoft.Azure.Search.DataUserAssignedIdentity"; + odatatype: string; } // @public @@ -333,7 +318,7 @@ export interface BaseSearchIndexerSkill { description?: string; inputs: InputFieldMappingEntry[]; name?: string; - odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Custom.ChatCompletionSkill" | "#Microsoft.Skills.Util.ContentUnderstandingSkill" | "#Microsoft.Skills.Custom.AmlSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" | "#Microsoft.Skills.Vision.VectorizeSkill"; + odatatype: string; outputs: OutputFieldMappingEntry[]; } @@ -368,7 +353,7 @@ export interface BaseSearchRequestOptions; dataToExtract?: string; odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill"; parsingMode?: string; @@ -860,7 +801,7 @@ export interface EdgeNGramTokenFilter { } // @public -export type EdgeNGramTokenFilterSide = "front" | "back"; +export type EdgeNGramTokenFilterSide = string; // @public export interface EdgeNGramTokenizer extends BaseLexicalTokenizer { @@ -947,15 +888,9 @@ export interface ExtractiveQueryCaption { // @public export interface FacetResult { - [property: string]: any; - readonly avg?: number; - readonly cardinality?: number; - readonly count?: number; - readonly facets?: { - [propertyName: string]: FacetResult[]; - }; - readonly max?: number; - readonly min?: number; + additionalProperties?: Record; + count?: number; + readonly facets?: Record; readonly sum?: number; } @@ -969,9 +904,7 @@ export interface FieldMapping { // @public export interface FieldMappingFunction { name: string; - parameters?: { - [propertyName: string]: any; - }; + parameters?: Record; } // @public @@ -1027,17 +960,10 @@ export type GetIndexOptions = OperationOptions; // @public export type GetIndexStatisticsOptions = OperationOptions; -// @public -export interface GetIndexStatsSummaryOptionalParams extends coreClient.OperationOptions { -} - // @public (undocumented) export interface GetIndexStatsSummaryOptions extends OperationOptions { } -// @public -export type GetIndexStatsSummaryResponse = ListIndexStatsSummary; - // @public (undocumented) export interface GetKnowledgeBaseOptions extends OperationOptions { } @@ -1111,7 +1037,7 @@ export type ImageAnalysisSkillLanguage = `${KnownImageAnalysisSkillLanguage}`; export type ImageDetail = `${KnownImageDetail}`; // @public -export type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "delete"; +export type IndexActionType = string; // @public export type IndexDocumentsAction = { @@ -1141,7 +1067,7 @@ export interface IndexDocumentsOptions extends OperationOptions { // @public export interface IndexDocumentsResult { - readonly results: IndexingResult[]; + results: IndexingResult[]; } // @public @@ -1166,9 +1092,6 @@ export interface IndexedOneLakeKnowledgeSourceParams extends BaseKnowledgeSource kind: "indexedOneLake"; } -// @public -export type IndexedSharePointContainerName = string; - // @public export interface IndexedSharePointKnowledgeSource extends BaseKnowledgeSource { indexedSharePointParameters: IndexedSharePointKnowledgeSourceParameters; @@ -1178,7 +1101,6 @@ export interface IndexedSharePointKnowledgeSource extends BaseKnowledgeSource { // @public export interface IndexedSharePointKnowledgeSourceParameters { connectionString: string; - containerName: IndexedSharePointContainerName; readonly createdResources?: { [propertyName: string]: string; }; @@ -1196,22 +1118,24 @@ export type IndexerExecutionEnvironment = `${KnownIndexerExecutionEnvironment}`; // @public export interface IndexerExecutionResult { - readonly endTime?: Date; - readonly errorMessage?: string; - readonly errors: SearchIndexerError[]; - readonly failedItemCount: number; - readonly finalTrackingState?: string; - readonly initialTrackingState?: string; - readonly itemCount: number; + // Warning: (ae-forgotten-export) The symbol "IndexerCurrentState" needs to be exported by the entry point index.d.ts + readonly currentState?: IndexerCurrentState; + endTime?: Date; + errorMessage?: string; + errors: SearchIndexerError[]; + failedItemCount: number; + finalTrackingState?: string; + initialTrackingState?: string; + itemCount: number; readonly mode?: IndexingMode; - readonly startTime?: Date; - readonly status: IndexerExecutionStatus; + startTime?: Date; + status: IndexerExecutionStatus; readonly statusDetail?: IndexerExecutionStatusDetail; - readonly warnings: SearchIndexerWarning[]; + warnings: SearchIndexerWarning[]; } // @public -export type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset"; +export type IndexerExecutionStatus = string; // @public export type IndexerExecutionStatusDetail = string; @@ -1223,32 +1147,7 @@ export type IndexerPermissionOption = string; export type IndexerResyncOption = string; // @public -export interface IndexerRuntime { - beginningTime: Date; - endingTime: Date; - remainingSeconds?: number; - usedSeconds: number; -} - -// @public -export interface IndexersResyncOptionalParams extends coreClient.OperationOptions { -} - -// @public -export interface IndexerState { - readonly allDocsFinalTrackingState?: string; - readonly allDocsInitialTrackingState?: string; - readonly mode?: IndexingMode; - readonly resetDatasourceDocumentIds?: string[]; - readonly resetDocsFinalTrackingState?: string; - readonly resetDocsInitialTrackingState?: string; - readonly resetDocumentKeys?: string[]; - readonly resyncFinalTrackingState?: string; - readonly resyncInitialTrackingState?: string; -} - -// @public -export type IndexerStatus = "unknown" | "error" | "running"; +export type IndexerStatus = string; // @public export type IndexingMode = string; @@ -1286,10 +1185,10 @@ export interface IndexingParametersConfiguration { // @public export interface IndexingResult { - readonly errorMessage?: string; - readonly key: string; - readonly statusCode: number; - readonly succeeded: boolean; + errorMessage?: string; + key: string; + statusCode: number; + succeeded: boolean; } // @public @@ -1310,9 +1209,9 @@ export type IndexProjectionMode = string; // @public export interface IndexStatisticsSummary { readonly documentCount: number; - name: string; + readonly name: string; readonly storageSize: number; - readonly vectorIndexSize: number; + readonly vectorIndexSize?: number; } // @public @@ -1376,34 +1275,22 @@ export interface KnowledgeBase { models: KnowledgeBaseModel[]; name: string; // (undocumented) - outputMode?: KnowledgeRetrievalOutputMode; + outputMode?: BaseKnowledgeRetrievalOutputMode; retrievalInstructions?: string; // (undocumented) - retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffort; + retrievalReasoningEffort?: BaseKnowledgeRetrievalReasoningEffort; } -// @public (undocumented) -export type KnowledgeBaseActivityRecord = BaseKnowledgeBaseActivityRecord | KnowledgeBaseRetrievalActivityRecord | KnowledgeBaseModelQueryPlanningActivityRecord | KnowledgeBaseModelAnswerSynthesisActivityRecord | KnowledgeBaseAgenticReasoningActivityRecord; +// @public +export type KnowledgeBaseActivityRecord = KnowledgeBaseModelQueryPlanningActivityRecord | KnowledgeBaseModelAnswerSynthesisActivityRecord | KnowledgeBaseAgenticReasoningActivityRecord | BaseKnowledgeBaseActivityRecord; // @public export interface KnowledgeBaseAgenticReasoningActivityRecord extends BaseKnowledgeBaseActivityRecord { reasoningTokens?: number; - // (undocumented) retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; type: "agenticReasoning"; } -// @public -export interface KnowledgeBaseAzureBlobActivityArguments { - search?: string; -} - -// @public -export interface KnowledgeBaseAzureBlobActivityRecord extends BaseKnowledgeBaseRetrievalActivityRecord { - azureBlobArguments?: KnowledgeBaseAzureBlobActivityArguments; - type: "azureBlob"; -} - // @public export interface KnowledgeBaseAzureBlobReference extends BaseKnowledgeBaseReference { blobUrl?: string; @@ -1418,28 +1305,17 @@ export interface KnowledgeBaseAzureOpenAIModel extends BaseKnowledgeBaseModel { // @public export interface KnowledgeBaseErrorAdditionalInfo { - readonly info?: Record; - readonly type?: string; + info?: Record; + type?: string; } // @public export interface KnowledgeBaseErrorDetail { - readonly additionalInfo?: KnowledgeBaseErrorAdditionalInfo[]; - readonly code?: string; - readonly details?: KnowledgeBaseErrorDetail[]; - readonly message?: string; - readonly target?: string; -} - -// @public -export interface KnowledgeBaseIndexedOneLakeActivityArguments { - search?: string; -} - -// @public -export interface KnowledgeBaseIndexedOneLakeActivityRecord extends BaseKnowledgeBaseRetrievalActivityRecord { - indexedOneLakeArguments?: KnowledgeBaseIndexedOneLakeActivityArguments; - type: "indexedOneLake"; + additionalInfo?: KnowledgeBaseErrorAdditionalInfo[]; + code?: string; + details?: KnowledgeBaseErrorDetail[]; + message?: string; + target?: string; } // @public @@ -1448,17 +1324,6 @@ export interface KnowledgeBaseIndexedOneLakeReference extends BaseKnowledgeBaseR type: "indexedOneLake"; } -// @public -export interface KnowledgeBaseIndexedSharePointActivityArguments { - search?: string; -} - -// @public -export interface KnowledgeBaseIndexedSharePointActivityRecord extends BaseKnowledgeBaseRetrievalActivityRecord { - indexedSharePointArguments?: KnowledgeBaseIndexedSharePointActivityArguments; - type: "indexedSharePoint"; -} - // @public export interface KnowledgeBaseIndexedSharePointReference extends BaseKnowledgeBaseReference { docUrl?: string; @@ -1470,29 +1335,22 @@ export type KnowledgeBaseIterator = PagedAsyncIterableIterator; // @public -export type KnowledgeSourceKind = string; +export type KnowledgeSourceParams = SearchIndexKnowledgeSourceParams | AzureBlobKnowledgeSourceParams | IndexedSharePointKnowledgeSourceParams | IndexedOneLakeKnowledgeSourceParams | WebKnowledgeSourceParams | RemoteSharePointKnowledgeSourceParams | BaseKnowledgeSourceParams; -// @public (undocumented) -export type KnowledgeSourceParams = BaseKnowledgeSourceParams | SearchIndexKnowledgeSourceParams | AzureBlobKnowledgeSourceParams | IndexedSharePointKnowledgeSourceParams | IndexedOneLakeKnowledgeSourceParams | WebKnowledgeSourceParams | RemoteSharePointKnowledgeSourceParams; - -// @public (undocumented) +// @public export interface KnowledgeSourceReference { name: string; } -// @public -export interface KnowledgeSourceStatistics { - averageItemsProcessedPerSynchronization: number; - averageSynchronizationDuration: string; - totalSynchronization: number; -} - -// @public -export interface KnowledgeSourceStatus { - currentSynchronizationState?: SynchronizationState; - lastSynchronizationState?: CompletedSynchronizationState; - statistics?: KnowledgeSourceStatistics; - synchronizationInterval?: string; - synchronizationStatus: KnowledgeSourceSynchronizationStatus; -} - -// @public -export type KnowledgeSourceSynchronizationStatus = string; - // @public (undocumented) export type KnowledgeSourceVectorizer = KnowledgeSourceAzureOpenAIVectorizer; @@ -1730,11 +1505,10 @@ export type KnowledgeSourceVectorizer = KnowledgeSourceAzureOpenAIVectorizer; export enum KnownAIFoundryModelCatalogName { CohereEmbedV3English = "Cohere-embed-v3-english", CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", - CohereEmbedV4 = "Cohere-embed-v4", FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", - OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", - OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" + OpenAiclipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", + OpenAiclipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" } // @public @@ -1836,14 +1610,6 @@ export enum KnownAnalyzerNames { // @public export enum KnownAzureOpenAIModelName { - Gpt41 = "gpt-4.1", - Gpt41Mini = "gpt-4.1-mini", - Gpt41Nano = "gpt-4.1-nano", - Gpt4O = "gpt-4o", - Gpt4OMini = "gpt-4o-mini", - Gpt5 = "gpt-5", - Gpt5Mini = "gpt-5-mini", - Gpt5Nano = "gpt-5-nano", TextEmbedding3Large = "text-embedding-3-large", TextEmbedding3Small = "text-embedding-3-small", TextEmbeddingAda002 = "text-embedding-ada-002" @@ -2112,22 +1878,6 @@ export enum KnownKnowledgeBaseModelKind { AzureOpenAI = "azureOpenAI" } -// @public -export enum KnownKnowledgeRetrievalOutputMode { - AnswerSynthesis = "answerSynthesis", - ExtractiveData = "extractiveData" -} - -// @public -export enum KnownKnowledgeSourceKind { - AzureBlob = "azureBlob", - IndexedOneLake = "indexedOneLake", - IndexedSharePoint = "indexedSharePoint", - RemoteSharePoint = "remoteSharePoint", - SearchIndex = "searchIndex", - Web = "web" -} - // @public export enum KnownLexicalAnalyzerName { ArLucene = "ar.lucene", @@ -2542,7 +2292,7 @@ export enum KnownQuerySpeller { // @public export enum KnownRankingOrder { BoostedRerankerScore = "BoostedRerankerScore", - ReRankerScore = "RerankerScore" + RerankerScore = "RerankerScore" } // @public @@ -2589,8 +2339,7 @@ export enum KnownSearchIndexerDataSourceType { AzureTable = "azuretable", CosmosDb = "cosmosdb", MySql = "mysql", - OneLake = "onelake", - SharePoint = "sharepoint" + OneLake = "onelake" } // @public @@ -2964,11 +2713,6 @@ export type ListIndexersOptions = OperationOptions; // @public export type ListIndexesOptions = OperationOptions; -// @public -export interface ListIndexStatsSummary { - readonly indexesStatistics: IndexStatisticsSummary[]; -} - // @public (undocumented) export interface ListKnowledgeBasesOptions extends OperationOptions { } @@ -3057,10 +2801,10 @@ export interface MicrosoftLanguageTokenizer extends BaseLexicalTokenizer { } // @public -export type MicrosoftStemmingTokenizerLanguage = "arabic" | "bangla" | "bulgarian" | "catalan" | "croatian" | "czech" | "danish" | "dutch" | "english" | "estonian" | "finnish" | "french" | "german" | "greek" | "gujarati" | "hebrew" | "hindi" | "hungarian" | "icelandic" | "indonesian" | "italian" | "kannada" | "latvian" | "lithuanian" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovak" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "turkish" | "ukrainian" | "urdu"; +export type MicrosoftStemmingTokenizerLanguage = string; // @public -export type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catalan" | "chineseSimplified" | "chineseTraditional" | "croatian" | "czech" | "danish" | "dutch" | "english" | "french" | "german" | "greek" | "gujarati" | "hindi" | "icelandic" | "indonesian" | "italian" | "japanese" | "kannada" | "korean" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "thai" | "ukrainian" | "urdu" | "vietnamese"; +export type MicrosoftTokenizerLanguage = string; // @public export type NarrowedModel = SelectFields> = (() => T extends TModel ? true : false) extends () => T extends never ? true : false ? TModel : (() => T extends TModel ? true : false) extends () => T extends object ? true : false ? TModel : (() => T extends TModel ? true : false) extends () => T extends any ? true : false ? TModel : (() => T extends TModel ? true : false) extends () => T extends unknown ? true : false ? TModel : (() => T extends TFields ? true : false) extends () => T extends never ? true : false ? never : (() => T extends TFields ? true : false) extends () => T extends SelectFields ? true : false ? TModel : SearchPick; @@ -3169,7 +2913,7 @@ export interface PatternTokenizer { export type PermissionFilter = string; // @public -export type PhoneticEncoder = "metaphone" | "doubleMetaphone" | "soundex" | "refinedSoundex" | "caverphone1" | "caverphone2" | "cologne" | "nysiis" | "koelnerPhonetik" | "haasePhonetik" | "beiderMorse"; +export type PhoneticEncoder = string; // @public export interface PhoneticTokenFilter extends BaseTokenFilter { @@ -3198,11 +2942,11 @@ export type QueryAnswer = ExtractiveQueryAnswer; // @public export interface QueryAnswerResult { - [property: string]: any; - readonly highlights?: string; - readonly key: string; - readonly score: number; - readonly text: string; + additionalProperties?: Record; + highlights?: string; + key?: string; + score?: number; + text?: string; } // @public @@ -3210,9 +2954,9 @@ export type QueryCaption = ExtractiveQueryCaption; // @public export interface QueryCaptionResult { - [property: string]: any; - readonly highlights?: string; - readonly text?: string; + additionalProperties?: Record; + highlights?: string; + text?: string; } // @public @@ -3224,9 +2968,7 @@ export type QueryLanguage = string; // @public export interface QueryResultDocumentInnerHit { readonly ordinal?: number; - readonly vectors?: { - [propertyName: string]: SingleVectorFieldResult; - }[]; + readonly vectors?: Record[]; } // @public @@ -3246,9 +2988,7 @@ export interface QueryResultDocumentSemanticField { export interface QueryResultDocumentSubscores { readonly documentBoost?: number; readonly text?: TextResult; - readonly vectors?: { - [propertyName: string]: SingleVectorFieldResult; - }[]; + readonly vectors?: Record[]; } // @public @@ -3270,7 +3010,7 @@ export interface QueryRewritesValuesDebugInfo { export type QuerySpeller = string; // @public -export type QueryType = "simple" | "full" | "semantic"; +export type QueryType = string; // @public export type RankingOrder = string; @@ -3281,20 +3021,14 @@ export type RegexFlags = `${KnownRegexFlags}`; // @public export interface RemoteSharePointKnowledgeSource extends BaseKnowledgeSource { kind: "remoteSharePoint"; - remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters; } // @public -export interface RemoteSharePointKnowledgeSourceParameters { +export interface RemoteSharePointKnowledgeSourceParams extends BaseKnowledgeSourceParams { containerTypeId?: string; filterExpression?: string; - resourceMetadata?: string[]; -} - -// @public -export interface RemoteSharePointKnowledgeSourceParams extends BaseKnowledgeSourceParams { - filterExpressionAddOn?: string; kind: "remoteSharePoint"; + resourceMetadata?: string[]; } // @public @@ -3348,10 +3082,10 @@ export interface ScalarQuantizationParameters { export type ScoringFunction = DistanceScoringFunction | FreshnessScoringFunction | MagnitudeScoringFunction | TagScoringFunction; // @public -export type ScoringFunctionAggregation = "sum" | "average" | "minimum" | "maximum" | "firstMatching" | "product"; +export type ScoringFunctionAggregation = string; // @public -export type ScoringFunctionInterpolation = "linear" | "constant" | "quadratic" | "logarithmic"; +export type ScoringFunctionInterpolation = string; // @public export interface ScoringProfile { @@ -3362,11 +3096,11 @@ export interface ScoringProfile { } // @public -export type ScoringStatistics = "local" | "global"; +export type ScoringStatistics = string; // @public export interface SearchAlias { - etag?: string; + eTag?: string; indexes: string[]; name: string; } @@ -3394,7 +3128,7 @@ export class SearchClient implements IndexDocumentsClient } // @public -export interface SearchClientOptions extends ExtendedCommonClientOptions { +export interface SearchClientOptions extends ClientOptions { // @deprecated apiVersion?: string; audience?: string; @@ -3495,7 +3229,6 @@ export class SearchIndexClient { getKnowledgeBase(knowledgeBaseName: string, options?: GetKnowledgeBaseOptions): Promise; getKnowledgeRetrievalClient(knowledgeBaseName: string, options?: KnowledgeRetrievalClientOptions): KnowledgeRetrievalClient; getKnowledgeSource(sourceName: string, options?: GetKnowledgeSourceOptions): Promise; - getKnowledgeSourceStatus(sourceName: string, options?: GetKnowledgeSourceStatusOptions): Promise; getSearchClient(indexName: string, options?: SearchClientOptions): SearchClient; getServiceStatistics(options?: GetServiceStatisticsOptions): Promise; getSynonymMap(synonymMapName: string, options?: GetSynonymMapsOptions): Promise; @@ -3511,7 +3244,7 @@ export class SearchIndexClient { } // @public -export interface SearchIndexClientOptions extends ExtendedCommonClientOptions { +export interface SearchIndexClientOptions extends ClientOptions { // @deprecated apiVersion?: string; audience?: string; @@ -3577,7 +3310,7 @@ export class SearchIndexerClient { } // @public -export interface SearchIndexerClientOptions extends ExtendedCommonClientOptions { +export interface SearchIndexerClientOptions extends ClientOptions { // @deprecated apiVersion?: string; audience?: string; @@ -3625,12 +3358,12 @@ export interface SearchIndexerDataUserAssignedIdentity extends BaseSearchIndexer // @public export interface SearchIndexerError { - readonly details?: string; - readonly documentationLink?: string; - readonly errorMessage: string; - readonly key?: string; - readonly name?: string; - readonly statusCode: number; + details?: string; + documentationLink?: string; + errorMessage: string; + key?: string; + name?: string; + statusCode: number; } // @public @@ -3700,13 +3433,15 @@ export interface SearchIndexerKnowledgeStoreTableProjectionSelector extends Sear tableName: string; } -// @public (undocumented) +// @public export interface SearchIndexerLimits { - readonly maxDocumentContentCharactersToExtract?: number; - readonly maxDocumentExtractionSize?: number; - readonly maxRunTime?: string; + maxDocumentContentCharactersToExtract?: number; + maxDocumentExtractionSize?: number; + maxRunTime?: string; } +// Warning: (ae-forgotten-export) The symbol "ContentUnderstandingSkill" needs to be exported by the entry point index.d.ts +// // @public export type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | DocumentIntelligenceLayoutSkill | ContentUnderstandingSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | VisionVectorizeSkill | WebApiSkills; @@ -3724,28 +3459,23 @@ export interface SearchIndexerSkillset { // @public export interface SearchIndexerStatus { - readonly currentState?: IndexerState; - readonly executionHistory: IndexerExecutionResult[]; - readonly lastResult?: IndexerExecutionResult; - readonly limits: SearchIndexerLimits; - readonly name: string; - readonly runtime: IndexerRuntime; - readonly status: IndexerStatus; + currentState?: IndexerCurrentState; + executionHistory: IndexerExecutionResult[]; + lastResult?: IndexerExecutionResult; + limits: SearchIndexerLimits; + name: string; + // Warning: (ae-forgotten-export) The symbol "IndexerRuntime" needs to be exported by the entry point index.d.ts + runtime?: IndexerRuntime; + status: IndexerStatus; } // @public export interface SearchIndexerWarning { - readonly details?: string; - readonly documentationLink?: string; - readonly key?: string; - readonly message: string; - readonly name?: string; -} - -// @public (undocumented) -export interface SearchIndexFieldReference { - // (undocumented) - name: string; + details?: string; + documentationLink?: string; + key?: string; + message: string; + name?: string; } // @public @@ -3806,10 +3536,8 @@ export interface SearchIndexKnowledgeSource extends BaseKnowledgeSource { // @public export interface SearchIndexKnowledgeSourceParameters { - searchFields?: SearchIndexFieldReference[]; searchIndexName: string; - semanticConfigurationName?: string; - sourceDataFields?: SearchIndexFieldReference[]; + sourceDataSelect?: string; } // @public @@ -3832,7 +3560,7 @@ export interface SearchIndexStatistics { export type SearchIterator = SelectFields> = PagedAsyncIterableIterator, SearchDocumentsPageResult, ListSearchResultsPageSettings>; // @public -export type SearchMode = "any" | "all"; +export type SearchMode = string; // @public export type SearchOptions = SelectFields> = OperationOptions & SearchRequestOptions; @@ -3887,7 +3615,9 @@ export interface SearchScoreThreshold extends BaseVectorThreshold { // @public export interface SearchServiceStatistics { + // Warning: (ae-forgotten-export) The symbol "ServiceCounters" needs to be exported by the entry point index.d.ts counters: ServiceCounters; + // Warning: (ae-forgotten-export) The symbol "ServiceLimits" needs to be exported by the entry point index.d.ts limits: ServiceLimits; } @@ -3930,7 +3660,6 @@ export type SemanticErrorReason = `${KnownSemanticErrorReason}`; // @public export interface SemanticField { - // (undocumented) name: string; } @@ -3986,29 +3715,6 @@ export interface SentimentSkillV3 extends BaseSearchIndexerSkill { odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill"; } -// @public -export interface ServiceCounters { - aliasCounter: ResourceCounter; - dataSourceCounter: ResourceCounter; - documentCounter: ResourceCounter; - indexCounter: ResourceCounter; - indexerCounter: ResourceCounter; - skillsetCounter: ResourceCounter; - storageSizeCounter: ResourceCounter; - synonymMapCounter: ResourceCounter; - vectorIndexSizeCounter: ResourceCounter; -} - -// @public -export interface ServiceLimits { - maxComplexCollectionFieldsPerIndex?: number; - maxComplexObjectsInCollectionsPerDocument?: number; - maxCumulativeIndexerRuntimeSeconds?: number; - maxFieldNestingDepthPerIndex?: number; - maxFieldsPerIndex?: number; - maxStoragePerIndexInBytes?: number; -} - // @public export interface ShaperSkill extends BaseSearchIndexerSkill { odatatype: "#Microsoft.Skills.Util.ShaperSkill"; @@ -4036,9 +3742,7 @@ export interface ShingleTokenFilter extends BaseTokenFilter { } // @public -export interface Similarity { - odatatype: "#Microsoft.Azure.Search.ClassicSimilarity" | "#Microsoft.Azure.Search.BM25Similarity"; -} +export type Similarity = ClassicSimilarity | BM25Similarity | SimilarityAlgorithm_2; // @public export type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity; @@ -4079,7 +3783,7 @@ export interface SnowballTokenFilter extends BaseTokenFilter { } // @public -export type SnowballTokenFilterLanguage = "armenian" | "basque" | "catalan" | "danish" | "dutch" | "english" | "finnish" | "french" | "german" | "german2" | "hungarian" | "italian" | "kp" | "lovins" | "norwegian" | "porter" | "portuguese" | "romanian" | "russian" | "spanish" | "swedish" | "turkish"; +export type SnowballTokenFilterLanguage = string; // @public export interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy { @@ -4127,7 +3831,7 @@ export interface StemmerTokenFilter extends BaseTokenFilter { } // @public -export type StemmerTokenFilterLanguage = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "dutchKp" | "english" | "lightEnglish" | "minimalEnglish" | "possessiveEnglish" | "porter2" | "lovins" | "finnish" | "lightFinnish" | "french" | "lightFrench" | "minimalFrench" | "galician" | "minimalGalician" | "german" | "german2" | "lightGerman" | "minimalGerman" | "greek" | "hindi" | "hungarian" | "lightHungarian" | "indonesian" | "irish" | "italian" | "lightItalian" | "sorani" | "latvian" | "norwegian" | "lightNorwegian" | "minimalNorwegian" | "lightNynorsk" | "minimalNynorsk" | "portuguese" | "lightPortuguese" | "minimalPortuguese" | "portugueseRslp" | "romanian" | "russian" | "lightRussian" | "spanish" | "lightSpanish" | "swedish" | "lightSwedish" | "turkish"; +export type StemmerTokenFilterLanguage = string; // @public export interface StopAnalyzer extends BaseLexicalAnalyzer { @@ -4136,7 +3840,7 @@ export interface StopAnalyzer extends BaseLexicalAnalyzer { } // @public -export type StopwordsList = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "english" | "finnish" | "french" | "galician" | "german" | "greek" | "hindi" | "hungarian" | "indonesian" | "irish" | "italian" | "latvian" | "norwegian" | "persian" | "portuguese" | "romanian" | "russian" | "sorani" | "spanish" | "swedish" | "thai" | "turkish"; +export type StopwordsList = string; // @public export interface StopwordsTokenFilter extends BaseTokenFilter { @@ -4178,14 +3882,6 @@ export type SuggestResult; }; -// @public -export interface SynchronizationState { - itemsSkipped: number; - itemsUpdatesFailed: number; - itemsUpdatesProcessed: number; - startTime: Date; -} - // @public export interface SynonymMap { encryptionKey?: SearchResourceEncryptionKey; @@ -4234,9 +3930,7 @@ export type TextTranslationSkillLanguage = `${KnownTextTranslationSkillLanguage} // @public export interface TextWeights { - weights: { - [propertyName: string]: number; - }; + weights: Record; } // @public @@ -4247,7 +3941,7 @@ export interface TokenAuthAzureMachineLearningVectorizerParameters extends BaseA } // @public -export type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol"; +export type TokenCharacterKind = string; // @public export type TokenFilter = AsciiFoldingTokenFilter | CjkBigramTokenFilter | CommonGramTokenFilter | DictionaryDecompounderTokenFilter | EdgeNGramTokenFilter | ElisionTokenFilter | KeepTokenFilter | KeywordMarkerTokenFilter | LengthTokenFilter | LimitTokenFilter | NGramTokenFilter | PatternCaptureTokenFilter | PatternReplaceTokenFilter | PhoneticTokenFilter | ShingleTokenFilter | SnowballTokenFilter | StemmerTokenFilter | StemmerOverrideTokenFilter | StopwordsTokenFilter | SynonymTokenFilter | TruncateTokenFilter | UniqueTokenFilter | WordDelimiterTokenFilter; @@ -4316,7 +4010,7 @@ export type VectorQuery = VectorizedQuery | Vecto // @public (undocumented) export type VectorQueryKind = `${KnownVectorQueryKind}`; -// @public (undocumented) +// @public export interface VectorsDebugInfo { readonly subscores?: QueryResultDocumentSubscores; } @@ -4427,26 +4121,10 @@ export interface WebApiVectorizer extends BaseVectorSearchVectorizer { // @public export interface WebKnowledgeSource extends BaseKnowledgeSource { kind: "web"; + // Warning: (ae-forgotten-export) The symbol "WebKnowledgeSourceParameters" needs to be exported by the entry point index.d.ts webParameters?: WebKnowledgeSourceParameters; } -// @public -export interface WebKnowledgeSourceDomain { - address: string; - includeSubpages?: boolean; -} - -// @public -export interface WebKnowledgeSourceDomains { - allowedDomains?: WebKnowledgeSourceDomain[]; - blockedDomains?: WebKnowledgeSourceDomain[]; -} - -// @public -export interface WebKnowledgeSourceParameters { - domains?: WebKnowledgeSourceDomains; -} - // @public export interface WebKnowledgeSourceParams extends BaseKnowledgeSourceParams { count?: number; diff --git a/sdk/search/search-documents/review/search-documents-search-api-node.api.md b/sdk/search/search-documents/review/search-documents-search-api-node.api.md new file mode 100644 index 000000000000..9dbc8cb29076 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-search-api-node.api.md @@ -0,0 +1,208 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export function autocompleteGet(context: SearchContext, searchText: string, suggesterName: string, options?: AutocompleteGetOptionalParams): Promise; + +// @public +export interface AutocompleteGetOptionalParams extends OperationOptions { + autocompleteMode?: AutocompleteMode; + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + querySourceAuthorization?: string; + searchFields?: string[]; + top?: number; + useFuzzyMatching?: boolean; +} + +// @public +export function autocompletePost(context: SearchContext, searchText: string, suggesterName: string, options?: AutocompletePostOptionalParams): Promise; + +// @public +export interface AutocompletePostOptionalParams extends OperationOptions { + autocompleteMode?: AutocompleteMode; + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + querySourceAuthorization?: string; + searchFields?: string; + top?: number; + useFuzzyMatching?: boolean; +} + +// @public (undocumented) +export function createSearch(endpointParam: string, credential: KeyCredential | TokenCredential, indexName: string, options?: SearchClientOptionalParams): SearchContext; + +// @public +export function getDocument(context: SearchContext, key: string, options?: GetDocumentOptionalParams): Promise; + +// @public +export function getDocumentCount(context: SearchContext, options?: GetDocumentCountOptionalParams): Promise; + +// @public +export interface GetDocumentCountOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export interface GetDocumentOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; + selectedFields?: string[]; +} + +// @public +export function index(context: SearchContext, batch: IndexDocumentsBatch, options?: IndexOptionalParams): Promise; + +// @public +export interface IndexOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export interface SearchClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface SearchContext extends Client { + apiVersion: string; + indexName: string; +} + +// @public +export function searchGet(context: SearchContext, options?: SearchGetOptionalParams): Promise; + +// @public +export interface SearchGetOptionalParams extends OperationOptions { + answers?: QueryAnswerType; + captions?: QueryCaptionType; + clientRequestId?: string; + debug?: QueryDebugMode; + facets?: string[]; + filter?: string; + highlightFields?: string[]; + highlightPostTag?: string; + highlightPreTag?: string; + includeTotalResultCount?: boolean; + minimumCoverage?: number; + orderBy?: string[]; + queryLanguage?: QueryLanguage; + queryRewrites?: QueryRewritesType; + querySourceAuthorization?: string; + queryType?: QueryType; + scoringParameters?: string[]; + scoringProfile?: string; + scoringStatistics?: ScoringStatistics; + searchFields?: string[]; + searchMode?: SearchMode; + searchText?: string; + select?: string[]; + semanticConfiguration?: string; + semanticErrorHandling?: SemanticErrorMode; + semanticFields?: string[]; + semanticMaxWaitInMilliseconds?: number; + semanticQuery?: string; + sessionId?: string; + skip?: number; + speller?: QuerySpellerType; + top?: number; +} + +// @public +export function searchPost(context: SearchContext, options?: SearchPostOptionalParams): Promise; + +// @public +export interface SearchPostOptionalParams extends OperationOptions { + answers?: QueryAnswerType; + captions?: QueryCaptionType; + clientRequestId?: string; + debug?: QueryDebugMode; + facets?: string[]; + filter?: string; + highlightFields?: string; + highlightPostTag?: string; + highlightPreTag?: string; + hybridSearch?: HybridSearch; + includeTotalCount?: boolean; + minimumCoverage?: number; + orderBy?: string; + queryLanguage?: QueryLanguage; + queryRewrites?: QueryRewritesType; + querySourceAuthorization?: string; + querySpeller?: QuerySpellerType; + queryType?: QueryType; + scoringParameters?: string[]; + scoringProfile?: string; + scoringStatistics?: ScoringStatistics; + searchFields?: string; + searchMode?: SearchMode; + searchText?: string; + select?: string; + semanticConfigurationName?: string; + semanticErrorHandling?: SemanticErrorMode; + semanticFields?: string; + semanticMaxWaitInMilliseconds?: number; + semanticQuery?: string; + sessionId?: string; + skip?: number; + top?: number; + vectorFilterMode?: VectorFilterMode; + vectorQueries?: VectorQueryUnion[]; +} + +// @public +export function suggestGet(context: SearchContext, searchText: string, suggesterName: string, options?: SuggestGetOptionalParams): Promise; + +// @public +export interface SuggestGetOptionalParams extends OperationOptions { + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + orderBy?: string[]; + querySourceAuthorization?: string; + searchFields?: string[]; + select?: string[]; + top?: number; + useFuzzyMatching?: boolean; +} + +// @public +export function suggestPost(context: SearchContext, searchText: string, suggesterName: string, options?: SuggestPostOptionalParams): Promise; + +// @public +export interface SuggestPostOptionalParams extends OperationOptions { + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + orderBy?: string; + querySourceAuthorization?: string; + searchFields?: string; + select?: string; + top?: number; + useFuzzyMatching?: boolean; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-search-node.api.md b/sdk/search/search-documents/review/search-documents-search-node.api.md new file mode 100644 index 000000000000..7937d62deab9 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-search-node.api.md @@ -0,0 +1,194 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export interface AutocompleteGetOptionalParams extends OperationOptions { + autocompleteMode?: AutocompleteMode; + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + querySourceAuthorization?: string; + searchFields?: string[]; + top?: number; + useFuzzyMatching?: boolean; +} + +// @public +export interface AutocompletePostOptionalParams extends OperationOptions { + autocompleteMode?: AutocompleteMode; + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + querySourceAuthorization?: string; + searchFields?: string; + top?: number; + useFuzzyMatching?: boolean; +} + +// @public +export interface GetDocumentCountOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export interface GetDocumentOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; + selectedFields?: string[]; +} + +// @public +export interface IndexOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public (undocumented) +export class SearchClient { + constructor(endpointParam: string, credential: KeyCredential | TokenCredential, indexName: string, options?: SearchClientOptionalParams); + autocompleteGet(searchText: string, suggesterName: string, options?: AutocompleteGetOptionalParams): Promise; + autocompletePost(searchText: string, suggesterName: string, options?: AutocompletePostOptionalParams): Promise; + getDocument(key: string, options?: GetDocumentOptionalParams): Promise; + getDocumentCount(options?: GetDocumentCountOptionalParams): Promise; + index(batch: IndexDocumentsBatch, options?: IndexOptionalParams): Promise; + readonly pipeline: Pipeline; + searchGet(options?: SearchGetOptionalParams): Promise; + searchPost(options?: SearchPostOptionalParams): Promise; + suggestGet(searchText: string, suggesterName: string, options?: SuggestGetOptionalParams): Promise; + suggestPost(searchText: string, suggesterName: string, options?: SuggestPostOptionalParams): Promise; +} + +// @public +export interface SearchClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface SearchContext extends Client { + apiVersion: string; + indexName: string; +} + +// @public +export interface SearchGetOptionalParams extends OperationOptions { + answers?: QueryAnswerType; + captions?: QueryCaptionType; + clientRequestId?: string; + debug?: QueryDebugMode; + facets?: string[]; + filter?: string; + highlightFields?: string[]; + highlightPostTag?: string; + highlightPreTag?: string; + includeTotalResultCount?: boolean; + minimumCoverage?: number; + orderBy?: string[]; + queryLanguage?: QueryLanguage; + queryRewrites?: QueryRewritesType; + querySourceAuthorization?: string; + queryType?: QueryType; + scoringParameters?: string[]; + scoringProfile?: string; + scoringStatistics?: ScoringStatistics; + searchFields?: string[]; + searchMode?: SearchMode; + searchText?: string; + select?: string[]; + semanticConfiguration?: string; + semanticErrorHandling?: SemanticErrorMode; + semanticFields?: string[]; + semanticMaxWaitInMilliseconds?: number; + semanticQuery?: string; + sessionId?: string; + skip?: number; + speller?: QuerySpellerType; + top?: number; +} + +// @public +export interface SearchPostOptionalParams extends OperationOptions { + answers?: QueryAnswerType; + captions?: QueryCaptionType; + clientRequestId?: string; + debug?: QueryDebugMode; + facets?: string[]; + filter?: string; + highlightFields?: string; + highlightPostTag?: string; + highlightPreTag?: string; + hybridSearch?: HybridSearch; + includeTotalCount?: boolean; + minimumCoverage?: number; + orderBy?: string; + queryLanguage?: QueryLanguage; + queryRewrites?: QueryRewritesType; + querySourceAuthorization?: string; + querySpeller?: QuerySpellerType; + queryType?: QueryType; + scoringParameters?: string[]; + scoringProfile?: string; + scoringStatistics?: ScoringStatistics; + searchFields?: string; + searchMode?: SearchMode; + searchText?: string; + select?: string; + semanticConfigurationName?: string; + semanticErrorHandling?: SemanticErrorMode; + semanticFields?: string; + semanticMaxWaitInMilliseconds?: number; + semanticQuery?: string; + sessionId?: string; + skip?: number; + top?: number; + vectorFilterMode?: VectorFilterMode; + vectorQueries?: VectorQueryUnion[]; +} + +// @public +export interface SuggestGetOptionalParams extends OperationOptions { + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + orderBy?: string[]; + querySourceAuthorization?: string; + searchFields?: string[]; + select?: string[]; + top?: number; + useFuzzyMatching?: boolean; +} + +// @public +export interface SuggestPostOptionalParams extends OperationOptions { + clientRequestId?: string; + filter?: string; + highlightPostTag?: string; + highlightPreTag?: string; + minimumCoverage?: number; + orderBy?: string; + querySourceAuthorization?: string; + searchFields?: string; + select?: string; + top?: number; + useFuzzyMatching?: boolean; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-searchIndex-api-node.api.md b/sdk/search/search-documents/review/search-documents-searchIndex-api-node.api.md new file mode 100644 index 000000000000..2e6d5853a6d1 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-searchIndex-api-node.api.md @@ -0,0 +1,288 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export function analyzeText(context: SearchIndexContext, request: AnalyzeTextOptions, indexName: string, options?: AnalyzeTextOptionalParams): Promise; + +// @public +export interface AnalyzeTextOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export function createAlias(context: SearchIndexContext, alias: SearchAlias, options?: CreateAliasOptionalParams): Promise; + +// @public +export interface CreateAliasOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function createIndex(context: SearchIndexContext, index: SearchIndex, options?: CreateIndexOptionalParams): Promise; + +// @public +export interface CreateIndexOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function createKnowledgeBase(context: SearchIndexContext, knowledgeBase: KnowledgeBase, options?: CreateKnowledgeBaseOptionalParams): Promise; + +// @public +export interface CreateKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function createKnowledgeSource(context: SearchIndexContext, knowledgeSource: KnowledgeSourceUnion, options?: CreateKnowledgeSourceOptionalParams): Promise; + +// @public +export interface CreateKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function createOrUpdateAlias(context: SearchIndexContext, alias: SearchAlias, aliasName: string, options?: CreateOrUpdateAliasOptionalParams): Promise; + +// @public +export interface CreateOrUpdateAliasOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function createOrUpdateIndex(context: SearchIndexContext, index: SearchIndex, indexName: string, options?: CreateOrUpdateIndexOptionalParams): Promise; + +// @public +export interface CreateOrUpdateIndexOptionalParams extends OperationOptions { + allowIndexDowntime?: boolean; + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; + querySourceAuthorization?: string; +} + +// @public +export function createOrUpdateKnowledgeBase(context: SearchIndexContext, knowledgeBase: KnowledgeBase, knowledgeBaseName: string, options?: CreateOrUpdateKnowledgeBaseOptionalParams): Promise; + +// @public +export interface CreateOrUpdateKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function createOrUpdateKnowledgeSource(context: SearchIndexContext, knowledgeSource: KnowledgeSourceUnion, sourceName: string, options?: CreateOrUpdateKnowledgeSourceOptionalParams): Promise; + +// @public +export interface CreateOrUpdateKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function createOrUpdateSynonymMap(context: SearchIndexContext, synonymMap: SynonymMap, synonymMapName: string, options?: CreateOrUpdateSynonymMapOptionalParams): Promise; + +// @public +export interface CreateOrUpdateSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public (undocumented) +export function createSearchIndex(endpointParam: string, credential: KeyCredential | TokenCredential, options?: SearchIndexClientOptionalParams): SearchIndexContext; + +// @public +export function createSynonymMap(context: SearchIndexContext, synonymMap: SynonymMap, options?: CreateSynonymMapOptionalParams): Promise; + +// @public +export interface CreateSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function deleteAlias(context: SearchIndexContext, aliasName: string, options?: DeleteAliasOptionalParams): Promise; + +// @public +export interface DeleteAliasOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function deleteIndex(context: SearchIndexContext, indexName: string, options?: DeleteIndexOptionalParams): Promise; + +// @public +export interface DeleteIndexOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; + querySourceAuthorization?: string; +} + +// @public +export function deleteKnowledgeBase(context: SearchIndexContext, knowledgeBaseName: string, options?: DeleteKnowledgeBaseOptionalParams): Promise; + +// @public +export interface DeleteKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function deleteKnowledgeSource(context: SearchIndexContext, sourceName: string, options?: DeleteKnowledgeSourceOptionalParams): Promise; + +// @public +export interface DeleteKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function deleteSynonymMap(context: SearchIndexContext, synonymMapName: string, options?: DeleteSynonymMapOptionalParams): Promise; + +// @public +export interface DeleteSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function getAlias(context: SearchIndexContext, aliasName: string, options?: GetAliasOptionalParams): Promise; + +// @public +export interface GetAliasOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getIndex(context: SearchIndexContext, indexName: string, options?: GetIndexOptionalParams): Promise; + +// @public +export interface GetIndexOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export function getIndexStatistics(context: SearchIndexContext, indexName: string, options?: GetIndexStatisticsOptionalParams): Promise; + +// @public +export interface GetIndexStatisticsOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export function getKnowledgeBase(context: SearchIndexContext, knowledgeBaseName: string, options?: GetKnowledgeBaseOptionalParams): Promise; + +// @public +export interface GetKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getKnowledgeSource(context: SearchIndexContext, sourceName: string, options?: GetKnowledgeSourceOptionalParams): Promise; + +// @public +export interface GetKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getServiceStatistics(context: SearchIndexContext, options?: GetServiceStatisticsOptionalParams): Promise; + +// @public +export interface GetServiceStatisticsOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getSynonymMap(context: SearchIndexContext, synonymMapName: string, options?: GetSynonymMapOptionalParams): Promise; + +// @public +export interface GetSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getSynonymMaps(context: SearchIndexContext, options?: GetSynonymMapsOptionalParams): Promise; + +// @public +export interface GetSynonymMapsOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export function listAliases(context: SearchIndexContext, options?: ListAliasesOptionalParams): PagedAsyncIterableIterator; + +// @public +export interface ListAliasesOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function listIndexes(context: SearchIndexContext, options?: ListIndexesOptionalParams): PagedAsyncIterableIterator; + +// @public +export interface ListIndexesOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export function listIndexStatsSummary(context: SearchIndexContext, options?: ListIndexStatsSummaryOptionalParams): PagedAsyncIterableIterator; + +// @public +export interface ListIndexStatsSummaryOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function listKnowledgeBases(context: SearchIndexContext, options?: ListKnowledgeBasesOptionalParams): PagedAsyncIterableIterator; + +// @public +export interface ListKnowledgeBasesOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function listKnowledgeSources(context: SearchIndexContext, options?: ListKnowledgeSourcesOptionalParams): PagedAsyncIterableIterator; + +// @public +export interface ListKnowledgeSourcesOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface SearchIndexClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface SearchIndexContext extends Client { + apiVersion: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-searchIndex-node.api.md b/sdk/search/search-documents/review/search-documents-searchIndex-node.api.md new file mode 100644 index 000000000000..f11239c712e6 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-searchIndex-node.api.md @@ -0,0 +1,234 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export interface AnalyzeTextOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export interface CreateAliasOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface CreateIndexOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface CreateKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface CreateKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface CreateOrUpdateAliasOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface CreateOrUpdateIndexOptionalParams extends OperationOptions { + allowIndexDowntime?: boolean; + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; + querySourceAuthorization?: string; +} + +// @public +export interface CreateOrUpdateKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface CreateOrUpdateKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface CreateOrUpdateSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface CreateSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface DeleteAliasOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface DeleteIndexOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; + querySourceAuthorization?: string; +} + +// @public +export interface DeleteKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface DeleteKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface DeleteSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface GetAliasOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetIndexOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export interface GetIndexStatisticsOptionalParams extends OperationOptions { + clientRequestId?: string; + querySourceAuthorization?: string; +} + +// @public +export interface GetKnowledgeBaseOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetKnowledgeSourceOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetServiceStatisticsOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetSynonymMapOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetSynonymMapsOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export interface ListAliasesOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface ListIndexesOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export interface ListIndexStatsSummaryOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface ListKnowledgeBasesOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface ListKnowledgeSourcesOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public (undocumented) +export class SearchIndexClient { + constructor(endpointParam: string, credential: KeyCredential | TokenCredential, options?: SearchIndexClientOptionalParams); + analyzeText(request: AnalyzeTextOptions, indexName: string, options?: AnalyzeTextOptionalParams): Promise; + createAlias(alias: SearchAlias, options?: CreateAliasOptionalParams): Promise; + createIndex(index: SearchIndex, options?: CreateIndexOptionalParams): Promise; + createKnowledgeBase(knowledgeBase: KnowledgeBase, options?: CreateKnowledgeBaseOptionalParams): Promise; + createKnowledgeSource(knowledgeSource: KnowledgeSourceUnion, options?: CreateKnowledgeSourceOptionalParams): Promise; + createOrUpdateAlias(alias: SearchAlias, aliasName: string, options?: CreateOrUpdateAliasOptionalParams): Promise; + createOrUpdateIndex(index: SearchIndex, indexName: string, options?: CreateOrUpdateIndexOptionalParams): Promise; + createOrUpdateKnowledgeBase(knowledgeBase: KnowledgeBase, knowledgeBaseName: string, options?: CreateOrUpdateKnowledgeBaseOptionalParams): Promise; + createOrUpdateKnowledgeSource(knowledgeSource: KnowledgeSourceUnion, sourceName: string, options?: CreateOrUpdateKnowledgeSourceOptionalParams): Promise; + createOrUpdateSynonymMap(synonymMap: SynonymMap, synonymMapName: string, options?: CreateOrUpdateSynonymMapOptionalParams): Promise; + createSynonymMap(synonymMap: SynonymMap, options?: CreateSynonymMapOptionalParams): Promise; + deleteAlias(aliasName: string, options?: DeleteAliasOptionalParams): Promise; + deleteIndex(indexName: string, options?: DeleteIndexOptionalParams): Promise; + deleteKnowledgeBase(knowledgeBaseName: string, options?: DeleteKnowledgeBaseOptionalParams): Promise; + deleteKnowledgeSource(sourceName: string, options?: DeleteKnowledgeSourceOptionalParams): Promise; + deleteSynonymMap(synonymMapName: string, options?: DeleteSynonymMapOptionalParams): Promise; + getAlias(aliasName: string, options?: GetAliasOptionalParams): Promise; + getIndex(indexName: string, options?: GetIndexOptionalParams): Promise; + getIndexStatistics(indexName: string, options?: GetIndexStatisticsOptionalParams): Promise; + getKnowledgeBase(knowledgeBaseName: string, options?: GetKnowledgeBaseOptionalParams): Promise; + getKnowledgeSource(sourceName: string, options?: GetKnowledgeSourceOptionalParams): Promise; + getServiceStatistics(options?: GetServiceStatisticsOptionalParams): Promise; + getSynonymMap(synonymMapName: string, options?: GetSynonymMapOptionalParams): Promise; + getSynonymMaps(options?: GetSynonymMapsOptionalParams): Promise; + listAliases(options?: ListAliasesOptionalParams): PagedAsyncIterableIterator; + listIndexes(options?: ListIndexesOptionalParams): PagedAsyncIterableIterator; + listIndexStatsSummary(options?: ListIndexStatsSummaryOptionalParams): PagedAsyncIterableIterator; + listKnowledgeBases(options?: ListKnowledgeBasesOptionalParams): PagedAsyncIterableIterator; + listKnowledgeSources(options?: ListKnowledgeSourcesOptionalParams): PagedAsyncIterableIterator; + readonly pipeline: Pipeline; +} + +// @public +export interface SearchIndexClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface SearchIndexContext extends Client { + apiVersion: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-searchIndexer-api-node.api.md b/sdk/search/search-documents/review/search-documents-searchIndexer-api-node.api.md new file mode 100644 index 000000000000..8d1f6d91b6a0 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-searchIndexer-api-node.api.md @@ -0,0 +1,218 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export function createDataSourceConnection(context: SearchIndexerContext, dataSource: SearchIndexerDataSourceConnection, options?: CreateDataSourceConnectionOptionalParams): Promise; + +// @public +export interface CreateDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function createIndexer(context: SearchIndexerContext, indexer: SearchIndexer, options?: CreateIndexerOptionalParams): Promise; + +// @public +export interface CreateIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function createOrUpdateDataSourceConnection(context: SearchIndexerContext, dataSource: SearchIndexerDataSourceConnection, dataSourceName: string, options?: CreateOrUpdateDataSourceConnectionOptionalParams): Promise; + +// @public +export interface CreateOrUpdateDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; + skipIndexerResetRequirementForCache?: boolean; +} + +// @public +export function createOrUpdateIndexer(context: SearchIndexerContext, indexer: SearchIndexer, indexerName: string, options?: CreateOrUpdateIndexerOptionalParams): Promise; + +// @public +export interface CreateOrUpdateIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; + disableCacheReprocessingChangeDetection?: boolean; + ifMatch?: string; + ifNoneMatch?: string; + skipIndexerResetRequirementForCache?: boolean; +} + +// @public +export function createOrUpdateSkillset(context: SearchIndexerContext, skillset: SearchIndexerSkillset, skillsetName: string, options?: CreateOrUpdateSkillsetOptionalParams): Promise; + +// @public +export interface CreateOrUpdateSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; + disableCacheReprocessingChangeDetection?: boolean; + ifMatch?: string; + ifNoneMatch?: string; + skipIndexerResetRequirementForCache?: boolean; +} + +// @public (undocumented) +export function createSearchIndexer(endpointParam: string, credential: KeyCredential | TokenCredential, options?: SearchIndexerClientOptionalParams): SearchIndexerContext; + +// @public +export function createSkillset(context: SearchIndexerContext, skillset: SearchIndexerSkillset, options?: CreateSkillsetOptionalParams): Promise; + +// @public +export interface CreateSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function deleteDataSourceConnection(context: SearchIndexerContext, dataSourceName: string, options?: DeleteDataSourceConnectionOptionalParams): Promise; + +// @public +export interface DeleteDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function deleteIndexer(context: SearchIndexerContext, indexerName: string, options?: DeleteIndexerOptionalParams): Promise; + +// @public +export interface DeleteIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function deleteSkillset(context: SearchIndexerContext, skillsetName: string, options?: DeleteSkillsetOptionalParams): Promise; + +// @public +export interface DeleteSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export function getDataSourceConnection(context: SearchIndexerContext, dataSourceName: string, options?: GetDataSourceConnectionOptionalParams): Promise; + +// @public +export interface GetDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getDataSourceConnections(context: SearchIndexerContext, options?: GetDataSourceConnectionsOptionalParams): Promise; + +// @public +export interface GetDataSourceConnectionsOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export function getIndexer(context: SearchIndexerContext, indexerName: string, options?: GetIndexerOptionalParams): Promise; + +// @public +export interface GetIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getIndexers(context: SearchIndexerContext, options?: GetIndexersOptionalParams): Promise; + +// @public +export interface GetIndexersOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export function getIndexerStatus(context: SearchIndexerContext, indexerName: string, options?: GetIndexerStatusOptionalParams): Promise; + +// @public +export interface GetIndexerStatusOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getSkillset(context: SearchIndexerContext, skillsetName: string, options?: GetSkillsetOptionalParams): Promise; + +// @public +export interface GetSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function getSkillsets(context: SearchIndexerContext, options?: GetSkillsetsOptionalParams): Promise; + +// @public +export interface GetSkillsetsOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export function resetDocuments(context: SearchIndexerContext, indexerName: string, options?: ResetDocumentsOptionalParams): Promise; + +// @public +export interface ResetDocumentsOptionalParams extends OperationOptions { + clientRequestId?: string; + keysOrIds?: DocumentKeysOrIds; + overwrite?: boolean; +} + +// @public +export function resetIndexer(context: SearchIndexerContext, indexerName: string, options?: ResetIndexerOptionalParams): Promise; + +// @public +export interface ResetIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function resetSkills(context: SearchIndexerContext, skillNames: SkillNames, skillsetName: string, options?: ResetSkillsOptionalParams): Promise; + +// @public +export interface ResetSkillsOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function resync(context: SearchIndexerContext, indexerName: string, options?: ResyncOptionalParams): Promise; + +// @public +export interface ResyncOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export function runIndexer(context: SearchIndexerContext, indexerName: string, options?: RunIndexerOptionalParams): Promise; + +// @public +export interface RunIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface SearchIndexerClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface SearchIndexerContext extends Client { + apiVersion: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/review/search-documents-searchIndexer-node.api.md b/sdk/search/search-documents/review/search-documents-searchIndexer-node.api.md new file mode 100644 index 000000000000..565a468d68a5 --- /dev/null +++ b/sdk/search/search-documents/review/search-documents-searchIndexer-node.api.md @@ -0,0 +1,180 @@ +## API Report File for "@azure/search-documents" + +> Do not edit this file. It is a report generated by [API Extractor](https://api-extractor.com/). + +```ts + +import { Client } from '@azure-rest/core-client'; +import { ClientOptions } from '@azure-rest/core-client'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure-rest/core-client'; +import { Pipeline } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; + +// @public +export interface CreateDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface CreateIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface CreateOrUpdateDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; + skipIndexerResetRequirementForCache?: boolean; +} + +// @public +export interface CreateOrUpdateIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; + disableCacheReprocessingChangeDetection?: boolean; + ifMatch?: string; + ifNoneMatch?: string; + skipIndexerResetRequirementForCache?: boolean; +} + +// @public +export interface CreateOrUpdateSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; + disableCacheReprocessingChangeDetection?: boolean; + ifMatch?: string; + ifNoneMatch?: string; + skipIndexerResetRequirementForCache?: boolean; +} + +// @public +export interface CreateSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface DeleteDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface DeleteIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface DeleteSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; + ifMatch?: string; + ifNoneMatch?: string; +} + +// @public +export interface GetDataSourceConnectionOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetDataSourceConnectionsOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export interface GetIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetIndexersOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export interface GetIndexerStatusOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetSkillsetOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface GetSkillsetsOptionalParams extends OperationOptions { + clientRequestId?: string; + select?: string; +} + +// @public +export interface ResetDocumentsOptionalParams extends OperationOptions { + clientRequestId?: string; + keysOrIds?: DocumentKeysOrIds; + overwrite?: boolean; +} + +// @public +export interface ResetIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface ResetSkillsOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface ResyncOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public +export interface RunIndexerOptionalParams extends OperationOptions { + clientRequestId?: string; +} + +// @public (undocumented) +export class SearchIndexerClient { + constructor(endpointParam: string, credential: KeyCredential | TokenCredential, options?: SearchIndexerClientOptionalParams); + createDataSourceConnection(dataSource: SearchIndexerDataSourceConnection, options?: CreateDataSourceConnectionOptionalParams): Promise; + createIndexer(indexer: SearchIndexer, options?: CreateIndexerOptionalParams): Promise; + createOrUpdateDataSourceConnection(dataSource: SearchIndexerDataSourceConnection, dataSourceName: string, options?: CreateOrUpdateDataSourceConnectionOptionalParams): Promise; + createOrUpdateIndexer(indexer: SearchIndexer, indexerName: string, options?: CreateOrUpdateIndexerOptionalParams): Promise; + createOrUpdateSkillset(skillset: SearchIndexerSkillset, skillsetName: string, options?: CreateOrUpdateSkillsetOptionalParams): Promise; + createSkillset(skillset: SearchIndexerSkillset, options?: CreateSkillsetOptionalParams): Promise; + deleteDataSourceConnection(dataSourceName: string, options?: DeleteDataSourceConnectionOptionalParams): Promise; + deleteIndexer(indexerName: string, options?: DeleteIndexerOptionalParams): Promise; + deleteSkillset(skillsetName: string, options?: DeleteSkillsetOptionalParams): Promise; + getDataSourceConnection(dataSourceName: string, options?: GetDataSourceConnectionOptionalParams): Promise; + getDataSourceConnections(options?: GetDataSourceConnectionsOptionalParams): Promise; + getIndexer(indexerName: string, options?: GetIndexerOptionalParams): Promise; + getIndexers(options?: GetIndexersOptionalParams): Promise; + getIndexerStatus(indexerName: string, options?: GetIndexerStatusOptionalParams): Promise; + getSkillset(skillsetName: string, options?: GetSkillsetOptionalParams): Promise; + getSkillsets(options?: GetSkillsetsOptionalParams): Promise; + readonly pipeline: Pipeline; + resetDocuments(indexerName: string, options?: ResetDocumentsOptionalParams): Promise; + resetIndexer(indexerName: string, options?: ResetIndexerOptionalParams): Promise; + resetSkills(skillNames: SkillNames, skillsetName: string, options?: ResetSkillsOptionalParams): Promise; + resync(indexerName: string, options?: ResyncOptionalParams): Promise; + runIndexer(indexerName: string, options?: RunIndexerOptionalParams): Promise; +} + +// @public +export interface SearchIndexerClientOptionalParams extends ClientOptions { + apiVersion?: string; +} + +// @public (undocumented) +export interface SearchIndexerContext extends Client { + apiVersion: string; +} + +// (No @packageDocumentation comment for this package) + +``` diff --git a/sdk/search/search-documents/sample.env b/sdk/search/search-documents/sample.env index 4ca6754dc8c4..508439fc7d62 100644 --- a/sdk/search/search-documents/sample.env +++ b/sdk/search/search-documents/sample.env @@ -1,14 +1 @@ -# The endpoint of your Azure Search account. -ENDPOINT= - -# The endpoint for the OpenAI service. -AZURE_OPENAI_ENDPOINT= - -# The name of the OpenAI deployment you'd like your tests to use. -AZURE_OPENAI_CHAT_DEPLOYMENT_NAME= -# deployment that has 'text-embedding-ada-002' -AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME= - -# Our tests assume that TEST_MODE is "playback" by default. You can -# change it to "record" to generate new recordings, or "live" to bypass the recorder entirely. -# TEST_MODE=playback +# Feel free to add your own environment variables. \ No newline at end of file diff --git a/sdk/search/search-documents/src/generated/data/index.ts b/sdk/search/search-documents/src/generated/data/index.ts deleted file mode 100644 index 2bee12aaf341..000000000000 --- a/sdk/search/search-documents/src/generated/data/index.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./models/index.js"; -export { SearchClient } from "./searchClient.js"; -export * from "./operationsInterfaces/index.js"; diff --git a/sdk/search/search-documents/src/generated/data/models/index.ts b/sdk/search/search-documents/src/generated/data/models/index.ts deleted file mode 100644 index f4e638470ed1..000000000000 --- a/sdk/search/search-documents/src/generated/data/models/index.ts +++ /dev/null @@ -1,1460 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; -import * as coreHttpCompat from "@azure/core-http-compat"; - -export type VectorQueryUnion = - | VectorQuery - | VectorizedQuery - | VectorizableTextQuery - | VectorizableImageUrlQuery - | VectorizableImageBinaryQuery; -export type VectorThresholdUnion = - | VectorThreshold - | VectorSimilarityThreshold - | SearchScoreThreshold; - -/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */ -export interface ErrorResponse { - /** The error object. */ - error?: ErrorDetail; -} - -/** The error detail. */ -export interface ErrorDetail { - /** - * The error code. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly code?: string; - /** - * The error message. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly message?: string; - /** - * The error target. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly target?: string; - /** - * The error details. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly details?: ErrorDetail[]; - /** - * The error additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly additionalInfo?: ErrorAdditionalInfo[]; -} - -/** The resource management error additional info. */ -export interface ErrorAdditionalInfo { - /** - * The additional info type. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly type?: string; - /** - * The additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly info?: Record; -} - -/** Response containing search results from an index. */ -export interface SearchDocumentsResult { - /** - * The total count of results found by the search operation, or null if the count was not requested. If present, the count may be greater than the number of results in this response. This can happen if you use the $top or $skip parameters, or if the query can't return all the requested documents in a single response. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly count?: number; - /** - * A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not specified in the request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly coverage?: number; - /** - * The facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not include any facet expressions. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly facets?: { [propertyName: string]: FacetResult[] }; - /** - * The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly answers?: QueryAnswerResult[]; - /** - * Continuation JSON payload returned when the query can't return all the requested results in a single response. You can use this JSON along with @odata.nextLink to formulate another POST Search request to get the next part of the search response. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly nextPageParameters?: SearchRequest; - /** - * The sequence of results returned by the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly results: SearchResult[]; - /** - * Continuation URL returned when the query can't return all the requested results in a single response. You can use this URL to formulate another GET or POST Search request to get the next part of the search response. Make sure to use the same verb (GET or POST) as the request that produced this response. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly nextLink?: string; - /** - * Reason that a partial response was returned for a semantic ranking request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semanticPartialResponseReason?: SemanticErrorReason; - /** - * Type of partial response that was returned for a semantic ranking request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semanticPartialResponseType?: SemanticSearchResultsType; - /** - * Type of query rewrite that was used to retrieve documents. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; -} - -/** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */ -export interface FacetResult { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** - * The approximate count of documents falling within the bucket described by this facet. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly count?: number; - /** - * The resulting total avg for the facet when a avg metric is requested. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly avg?: number; - /** - * The resulting total min for the facet when a min metric is requested. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly min?: number; - /** - * The resulting total max for the facet when a max metric is requested. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly max?: number; - /** - * The resulting total sum for the facet when a sum metric is requested. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly sum?: number; - /** - * The resulting total cardinality for the facet when a cardinality metric is requested. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly cardinality?: number; - /** - * The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly facets?: { [propertyName: string]: FacetResult[] }; -} - -/** An answer is a text passage extracted from the contents of the most relevant documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. */ -export interface QueryAnswerResult { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** - * The score value represents how relevant the answer is to the query relative to other answers returned for the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly score: number; - /** - * The key of the document the answer was extracted from. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly key: string; - /** - * The text passage extracted from the document contents as the answer. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text: string; - /** - * Same text passage as in the Text property with highlighted text phrases most relevant to the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly highlights?: string; -} - -/** Parameters for filtering, sorting, faceting, paging, and other search query behaviors. */ -export interface SearchRequest { - /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ - includeTotalResultCount?: boolean; - /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ - facets?: string[]; - /** The OData $filter expression to apply to the search query. */ - filter?: string; - /** The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ - highlightFields?: string; - /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ - highlightPostTag?: string; - /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ - highlightPreTag?: string; - /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ - minimumCoverage?: number; - /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ - orderBy?: string; - /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ - queryType?: QueryType; - /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ - scoringStatistics?: ScoringStatistics; - /** A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ - sessionId?: string; - /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ - scoringParameters?: string[]; - /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ - scoringProfile?: string; - /** Enables a debugging tool that can be used to further explore your reranked results. */ - debug?: QueryDebugMode; - /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ - searchText?: string; - /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ - searchFields?: string; - /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ - searchMode?: SearchMode; - /** A value that specifies the language of the search query. */ - queryLanguage?: QueryLanguage; - /** A value that specified the type of the speller to use to spell-correct individual search query terms. */ - speller?: QuerySpellerType; - /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ - select?: string; - /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ - skip?: number; - /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ - top?: number; - /** The name of a semantic configuration that will be used when processing documents for queries of type semantic. */ - semanticConfigurationName?: string; - /** Allows the user to choose whether a semantic call should fail completely (default / current behavior), or to return partial results. */ - semanticErrorHandling?: SemanticErrorMode; - /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ - semanticMaxWaitInMilliseconds?: number; - /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ - semanticQuery?: string; - /** A value that specifies whether answers should be returned as part of the search response. */ - answers?: QueryAnswerType; - /** A value that specifies whether captions should be returned as part of the search response. */ - captions?: QueryCaptionType; - /** A value that specifies whether query rewrites should be generated to augment the search query. */ - queryRewrites?: QueryRewritesType; - /** The comma-separated list of field names used for semantic ranking. */ - semanticFields?: string; - /** The query parameters for vector and hybrid search queries. */ - vectorQueries?: VectorQueryUnion[]; - /** Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. */ - vectorFilterMode?: VectorFilterMode; - /** The query parameters to configure hybrid search behaviors. */ - hybridSearch?: HybridSearch; -} - -/** The query parameters for vector and hybrid search queries. */ -export interface VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vector" | "text" | "imageUrl" | "imageBinary"; - /** Number of nearest neighbors to return as top hits. */ - kNearestNeighborsCount?: number; - /** Vector Fields of type Collection(Edm.Single) to be included in the vector searched. */ - fields?: string; - /** When true, triggers an exhaustive k-nearest neighbor search across all vectors within the vector index. Useful for scenarios where exact matches are critical, such as determining ground truth values. */ - exhaustive?: boolean; - /** Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method is used on the underlying vector field. */ - oversampling?: number; - /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */ - weight?: number; - /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */ - threshold?: VectorThresholdUnion; - /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead. */ - filterOverride?: string; - /** Controls how many vectors can be matched from each document in a vector search query. Setting it to 1 ensures at most one vector per document is matched, guaranteeing results come from distinct documents. Setting it to 0 (unlimited) allows multiple relevant vectors from the same document to be matched. Default is 0. */ - perDocumentVectorLimit?: number; -} - -/** The threshold used for vector queries. */ -export interface VectorThreshold { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vectorSimilarity" | "searchScore"; -} - -/** TThe query parameters to configure hybrid search behaviors. */ -export interface HybridSearch { - /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */ - maxTextRecallSize?: number; - /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */ - countAndFacetMode?: HybridCountAndFacetMode; -} - -/** Contains a document found by a search query, plus associated metadata. */ -export interface SearchResult { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** - * The relevance score of the document compared to other documents returned by the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _score: number; - /** - * The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _rerankerScore?: number; - /** - * The relevance score computed by boosting the Reranker Score. Search results are sorted by the RerankerScore/RerankerBoostedScore based on useScoringProfileBoostedRanking in the Semantic Config. RerankerBoostedScore is only returned for queries of type 'semantic' - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _rerankerBoostedScore?: number; - /** - * Text fragments from the document that indicate the matching search terms, organized by each applicable field; null if hit highlighting was not enabled for the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _highlights?: { [propertyName: string]: string[] }; - /** - * Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _captions?: QueryCaptionResult[]; - /** - * Contains debugging information that can be used to further explore your search results. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _documentDebugInfo?: DocumentDebugInfo; -} - -/** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */ -export interface QueryCaptionResult { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** - * A representative text passage extracted from the document most relevant to the search query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text?: string; - /** - * Same text passage as in the Text property with highlighted phrases most relevant to the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly highlights?: string; -} - -/** Contains debugging information that can be used to further explore your search results. */ -export interface DocumentDebugInfo { - /** - * Contains debugging information specific to semantic ranking requests. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semantic?: SemanticDebugInfo; - /** - * Contains debugging information specific to vector and hybrid search. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: VectorsDebugInfo; - /** - * Contains debugging information specific to vectors matched within a collection of complex types. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly innerHits?: { - [propertyName: string]: QueryResultDocumentInnerHit[]; - }; -} - -export interface SemanticDebugInfo { - /** - * The title field that was sent to the semantic enrichment process, as well as how it was used - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly titleField?: QueryResultDocumentSemanticField; - /** - * The content fields that were sent to the semantic enrichment process, as well as how they were used - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly contentFields?: QueryResultDocumentSemanticField[]; - /** - * The keyword fields that were sent to the semantic enrichment process, as well as how they were used - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly keywordFields?: QueryResultDocumentSemanticField[]; - /** - * The raw concatenated strings that were sent to the semantic enrichment process. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly rerankerInput?: QueryResultDocumentRerankerInput; -} - -/** Description of fields that were sent to the semantic enrichment process, as well as how they were used */ -export interface QueryResultDocumentSemanticField { - /** - * The name of the field that was sent to the semantic enrichment process - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly name?: string; - /** - * The way the field was used for the semantic enrichment process (fully used, partially used, or unused) - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly state?: SemanticFieldState; -} - -/** The raw concatenated strings that were sent to the semantic enrichment process. */ -export interface QueryResultDocumentRerankerInput { - /** - * The raw string for the title field that was used for semantic enrichment. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly title?: string; - /** - * The raw concatenated strings for the content fields that were used for semantic enrichment. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly content?: string; - /** - * The raw concatenated strings for the keyword fields that were used for semantic enrichment. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly keywords?: string; -} - -export interface VectorsDebugInfo { - /** - * The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly subscores?: QueryResultDocumentSubscores; -} - -/** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */ -export interface QueryResultDocumentSubscores { - /** - * The BM25 or Classic score for the text portion of the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text?: TextResult; - /** - * The vector similarity and @search.score values for each vector query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: { [propertyName: string]: SingleVectorFieldResult }[]; - /** - * The BM25 or Classic score for the text portion of the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentBoost?: number; -} - -/** The BM25 or Classic score for the text portion of the query. */ -export interface TextResult { - /** - * The BM25 or Classic score for the text portion of the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly searchScore?: number; -} - -/** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */ -export interface SingleVectorFieldResult { - /** - * The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly searchScore?: number; - /** - * The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectorSimilarity?: number; -} - -/** Detailed scoring information for an individual element of a complex collection. */ -export interface QueryResultDocumentInnerHit { - /** - * Position of this specific matching element within it's original collection. Position starts at 0. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly ordinal?: number; - /** - * Detailed scoring information for an individual element of a complex collection that matched a vector query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: { [propertyName: string]: SingleVectorFieldResult }[]; -} - -/** Response containing suggestion query results from an index. */ -export interface SuggestDocumentsResult { - /** - * The sequence of results returned by the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly results: SuggestResult[]; - /** - * A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not set in the request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly coverage?: number; -} - -/** A result containing a document found by a suggestion query, plus associated metadata. */ -export interface SuggestResult { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** - * The text of the suggestion result. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly _text: string; -} - -/** Parameters for filtering, sorting, fuzzy matching, and other suggestions query behaviors. */ -export interface SuggestRequest { - /** An OData expression that filters the documents considered for suggestions. */ - filter?: string; - /** A value indicating whether to use fuzzy matching for the suggestion query. Default is false. When set to true, the query will find suggestions even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and consume more resources. */ - useFuzzyMatching?: boolean; - /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ - highlightPostTag?: string; - /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ - highlightPreTag?: string; - /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestion query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ - minimumCoverage?: number; - /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ - orderBy?: string; - /** The search text to use to suggest documents. Must be at least 1 character, and no more than 100 characters. */ - searchText: string; - /** The comma-separated list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ - searchFields?: string; - /** The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results. */ - select?: string; - /** The name of the suggester as specified in the suggesters collection that's part of the index definition. */ - suggesterName: string; - /** The number of suggestions to retrieve. This must be a value between 1 and 100. The default is 5. */ - top?: number; -} - -/** Contains a batch of document write actions to send to the index. */ -export interface IndexBatch { - /** The actions in the batch. */ - actions: IndexAction[]; -} - -/** Represents an index action that operates on a document. */ -export interface IndexAction { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** The operation to perform on a document in an indexing batch. */ - __actionType: IndexActionType; -} - -/** Response containing the status of operations for all documents in the indexing request. */ -export interface IndexDocumentsResult { - /** - * The list of status information for each document in the indexing request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly results: IndexingResult[]; -} - -/** Status of an indexing operation for a single document. */ -export interface IndexingResult { - /** - * The key of a document that was in the indexing request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly key: string; - /** - * The error message explaining why the indexing operation failed for the document identified by the key; null if indexing succeeded. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly errorMessage?: string; - /** - * A value indicating whether the indexing operation succeeded for the document identified by the key. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly succeeded: boolean; - /** - * The status code of the indexing operation. Possible values include: 200 for a successful update or delete, 201 for successful document creation, 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly statusCode: number; -} - -/** The result of Autocomplete query. */ -export interface AutocompleteResult { - /** - * A value indicating the percentage of the index that was considered by the autocomplete request, or null if minimumCoverage was not specified in the request. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly coverage?: number; - /** - * The list of returned Autocompleted items. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly results: AutocompleteItem[]; -} - -/** The result of Autocomplete requests. */ -export interface AutocompleteItem { - /** - * The completed term. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text: string; - /** - * The query along with the completed term. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly queryPlusText: string; -} - -/** Parameters for fuzzy matching, and other autocomplete query behaviors. */ -export interface AutocompleteRequest { - /** The search text on which to base autocomplete results. */ - searchText: string; - /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ - autocompleteMode?: AutocompleteMode; - /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ - filter?: string; - /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will autocomplete terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ - useFuzzyMatching?: boolean; - /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ - highlightPostTag?: string; - /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ - highlightPreTag?: string; - /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ - minimumCoverage?: number; - /** The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ - searchFields?: string; - /** The name of the suggester as specified in the suggesters collection that's part of the index definition. */ - suggesterName: string; - /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ - top?: number; -} - -/** Contains debugging information that can be used to further explore your search results. */ -export interface DebugInfo { - /** - * Contains debugging information specific to query rewrites. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly queryRewrites?: QueryRewritesDebugInfo; -} - -/** Contains debugging information specific to query rewrites. */ -export interface QueryRewritesDebugInfo { - /** - * List of query rewrites generated for the text query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text?: QueryRewritesValuesDebugInfo; - /** - * List of query rewrites generated for the vectorizable text queries. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: QueryRewritesValuesDebugInfo[]; -} - -/** Contains debugging information specific to query rewrites. */ -export interface QueryRewritesValuesDebugInfo { - /** - * The input text to the generative query rewriting model. There may be cases where the user query and the input to the generative model are not identical. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly inputQuery?: string; - /** - * List of query rewrites. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly rewrites?: string[]; -} - -/** The query parameters to use for vector search when a raw vector value is provided. */ -export interface VectorizedQuery extends VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vector"; - /** The vector representation of a search query. */ - vector: number[]; -} - -/** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */ -export interface VectorizableTextQuery extends VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "text"; - /** The text to be vectorized to perform a vector search query. */ - text: string; - /** Can be configured to let a generative model rewrite the query before sending it to be vectorized. */ - queryRewrites?: QueryRewritesType; -} - -/** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */ -export interface VectorizableImageUrlQuery extends VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "imageUrl"; - /** The URL of an image to be vectorized to perform a vector search query. */ - url?: string; -} - -/** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */ -export interface VectorizableImageBinaryQuery extends VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "imageBinary"; - /** The base 64 encoded binary of an image to be vectorized to perform a vector search query. */ - base64Image?: string; -} - -/** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ -export interface VectorSimilarityThreshold extends VectorThreshold { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vectorSimilarity"; - /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ - value: number; -} - -/** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ -export interface SearchScoreThreshold extends VectorThreshold { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "searchScore"; - /** The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ - value: number; -} - -/** Parameter group */ -export interface SearchOptions { - /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ - includeTotalResultCount?: boolean; - /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ - facets?: string[]; - /** The OData $filter expression to apply to the search query. */ - filter?: string; - /** The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ - highlightFields?: string[]; - /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ - highlightPostTag?: string; - /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ - highlightPreTag?: string; - /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ - minimumCoverage?: number; - /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, and desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no OrderBy is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ - orderBy?: string[]; - /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ - queryType?: QueryType; - /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ - scoringParameters?: string[]; - /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ - scoringProfile?: string; - /** The list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ - searchFields?: string[]; - /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ - searchMode?: SearchMode; - /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. */ - scoringStatistics?: ScoringStatistics; - /** A value to be used to create a sticky session, which can help to get more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ - sessionId?: string; - /** The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ - select?: string[]; - /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use $skip due to this limitation, consider using $orderby on a totally-ordered key and $filter with a range query instead. */ - skip?: number; - /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ - top?: number; - /** The name of the semantic configuration that lists which fields should be used for semantic ranking, captions, highlights, and answers */ - semanticConfiguration?: string; - /** Allows the user to choose whether a semantic call should fail completely, or to return partial results (default). */ - semanticErrorHandling?: SemanticErrorMode; - /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ - semanticMaxWaitInMilliseconds?: number; - /** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ - answers?: QueryAnswerType; - /** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ - captions?: QueryCaptionType; - /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ - semanticQuery?: string; - /** When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. This parameter is only valid if the query type is `semantic`. */ - queryRewrites?: QueryRewritesType; - /** Enables a debugging tool that can be used to further explore your search results. */ - debug?: QueryDebugMode; - /** The language of the query. */ - queryLanguage?: QueryLanguage; - /** Improve search recall by spell-correcting individual search query terms. */ - speller?: QuerySpellerType; - /** The list of field names used for semantic ranking. */ - semanticFields?: string[]; -} - -/** Parameter group */ -export interface SuggestOptions { - /** An OData expression that filters the documents considered for suggestions. */ - filter?: string; - /** A value indicating whether to use fuzzy matching for the suggestions query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and consume more resources. */ - useFuzzyMatching?: boolean; - /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ - highlightPostTag?: string; - /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ - highlightPreTag?: string; - /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestions query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ - minimumCoverage?: number; - /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ - orderBy?: string[]; - /** The list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ - searchFields?: string[]; - /** The list of fields to retrieve. If unspecified, only the key field will be included in the results. */ - select?: string[]; - /** The number of suggestions to retrieve. The value must be a number between 1 and 100. The default is 5. */ - top?: number; -} - -/** Parameter group */ -export interface AutocompleteOptions { - /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ - autocompleteMode?: AutocompleteMode; - /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ - filter?: string; - /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ - useFuzzyMatching?: boolean; - /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ - highlightPostTag?: string; - /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ - highlightPreTag?: string; - /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ - minimumCoverage?: number; - /** The list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ - searchFields?: string[]; - /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ - top?: number; -} - -/** Known values of {@link ApiVersion20251101Preview} that the service accepts. */ -export enum KnownApiVersion20251101Preview { - /** Api Version '2025-11-01-preview' */ - TwoThousandTwentyFive1101Preview = "2025-11-01-preview", -} - -/** - * Defines values for ApiVersion20251101Preview. \ - * {@link KnownApiVersion20251101Preview} can be used interchangeably with ApiVersion20251101Preview, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **2025-11-01-preview**: Api Version '2025-11-01-preview' - */ -export type ApiVersion20251101Preview = string; - -/** Known values of {@link SemanticErrorMode} that the service accepts. */ -export enum KnownSemanticErrorMode { - /** If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. */ - Partial = "partial", - /** If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. */ - Fail = "fail", -} - -/** - * Defines values for SemanticErrorMode. \ - * {@link KnownSemanticErrorMode} can be used interchangeably with SemanticErrorMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **partial**: If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. \ - * **fail**: If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. - */ -export type SemanticErrorMode = string; - -/** Known values of {@link QueryAnswerType} that the service accepts. */ -export enum KnownQueryAnswerType { - /** Do not return answers for the query. */ - None = "none", - /** Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. */ - Extractive = "extractive", -} - -/** - * Defines values for QueryAnswerType. \ - * {@link KnownQueryAnswerType} can be used interchangeably with QueryAnswerType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Do not return answers for the query. \ - * **extractive**: Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. - */ -export type QueryAnswerType = string; - -/** Known values of {@link QueryCaptionType} that the service accepts. */ -export enum KnownQueryCaptionType { - /** Do not return captions for the query. */ - None = "none", - /** Extracts captions from the matching documents that contain passages relevant to the search query. */ - Extractive = "extractive", -} - -/** - * Defines values for QueryCaptionType. \ - * {@link KnownQueryCaptionType} can be used interchangeably with QueryCaptionType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Do not return captions for the query. \ - * **extractive**: Extracts captions from the matching documents that contain passages relevant to the search query. - */ -export type QueryCaptionType = string; - -/** Known values of {@link QueryRewritesType} that the service accepts. */ -export enum KnownQueryRewritesType { - /** Do not generate additional query rewrites for this query. */ - None = "none", - /** Generate alternative query terms to increase the recall of a search request. */ - Generative = "generative", -} - -/** - * Defines values for QueryRewritesType. \ - * {@link KnownQueryRewritesType} can be used interchangeably with QueryRewritesType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Do not generate additional query rewrites for this query. \ - * **generative**: Generate alternative query terms to increase the recall of a search request. - */ -export type QueryRewritesType = string; - -/** Known values of {@link QueryDebugMode} that the service accepts. */ -export enum KnownQueryDebugMode { - /** No query debugging information will be returned. */ - Disabled = "disabled", - /** Allows the user to further explore their reranked results. */ - Semantic = "semantic", - /** Allows the user to further explore their hybrid and vector query results. */ - Vector = "vector", - /** Allows the user to explore the list of query rewrites generated for their search request. */ - QueryRewrites = "queryRewrites", - /** Allows the user to retrieve scoring information regarding vectors matched within a collection of complex types. */ - InnerHits = "innerHits", - /** Turn on all debug options. */ - All = "all", -} - -/** - * Defines values for QueryDebugMode. \ - * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **disabled**: No query debugging information will be returned. \ - * **semantic**: Allows the user to further explore their reranked results. \ - * **vector**: Allows the user to further explore their hybrid and vector query results. \ - * **queryRewrites**: Allows the user to explore the list of query rewrites generated for their search request. \ - * **innerHits**: Allows the user to retrieve scoring information regarding vectors matched within a collection of complex types. \ - * **all**: Turn on all debug options. - */ -export type QueryDebugMode = string; - -/** Known values of {@link QueryLanguage} that the service accepts. */ -export enum KnownQueryLanguage { - /** Query language not specified. */ - None = "none", - /** Query language value for English (United States). */ - EnUs = "en-us", - /** Query language value for English (Great Britain). */ - EnGb = "en-gb", - /** Query language value for English (India). */ - EnIn = "en-in", - /** Query language value for English (Canada). */ - EnCa = "en-ca", - /** Query language value for English (Australia). */ - EnAu = "en-au", - /** Query language value for French (France). */ - FrFr = "fr-fr", - /** Query language value for French (Canada). */ - FrCa = "fr-ca", - /** Query language value for German (Germany). */ - DeDe = "de-de", - /** Query language value for Spanish (Spain). */ - EsEs = "es-es", - /** Query language value for Spanish (Mexico). */ - EsMx = "es-mx", - /** Query language value for Chinese (China). */ - ZhCn = "zh-cn", - /** Query language value for Chinese (Taiwan). */ - ZhTw = "zh-tw", - /** Query language value for Portuguese (Brazil). */ - PtBr = "pt-br", - /** Query language value for Portuguese (Portugal). */ - PtPt = "pt-pt", - /** Query language value for Italian (Italy). */ - ItIt = "it-it", - /** Query language value for Japanese (Japan). */ - JaJp = "ja-jp", - /** Query language value for Korean (Korea). */ - KoKr = "ko-kr", - /** Query language value for Russian (Russia). */ - RuRu = "ru-ru", - /** Query language value for Czech (Czech Republic). */ - CsCz = "cs-cz", - /** Query language value for Dutch (Belgium). */ - NlBe = "nl-be", - /** Query language value for Dutch (Netherlands). */ - NlNl = "nl-nl", - /** Query language value for Hungarian (Hungary). */ - HuHu = "hu-hu", - /** Query language value for Polish (Poland). */ - PlPl = "pl-pl", - /** Query language value for Swedish (Sweden). */ - SvSe = "sv-se", - /** Query language value for Turkish (Turkey). */ - TrTr = "tr-tr", - /** Query language value for Hindi (India). */ - HiIn = "hi-in", - /** Query language value for Arabic (Saudi Arabia). */ - ArSa = "ar-sa", - /** Query language value for Arabic (Egypt). */ - ArEg = "ar-eg", - /** Query language value for Arabic (Morocco). */ - ArMa = "ar-ma", - /** Query language value for Arabic (Kuwait). */ - ArKw = "ar-kw", - /** Query language value for Arabic (Jordan). */ - ArJo = "ar-jo", - /** Query language value for Danish (Denmark). */ - DaDk = "da-dk", - /** Query language value for Norwegian (Norway). */ - NoNo = "no-no", - /** Query language value for Bulgarian (Bulgaria). */ - BgBg = "bg-bg", - /** Query language value for Croatian (Croatia). */ - HrHr = "hr-hr", - /** Query language value for Croatian (Bosnia and Herzegovina). */ - HrBa = "hr-ba", - /** Query language value for Malay (Malaysia). */ - MsMy = "ms-my", - /** Query language value for Malay (Brunei Darussalam). */ - MsBn = "ms-bn", - /** Query language value for Slovenian (Slovenia). */ - SlSl = "sl-sl", - /** Query language value for Tamil (India). */ - TaIn = "ta-in", - /** Query language value for Vietnamese (Viet Nam). */ - ViVn = "vi-vn", - /** Query language value for Greek (Greece). */ - ElGr = "el-gr", - /** Query language value for Romanian (Romania). */ - RoRo = "ro-ro", - /** Query language value for Icelandic (Iceland). */ - IsIs = "is-is", - /** Query language value for Indonesian (Indonesia). */ - IdId = "id-id", - /** Query language value for Thai (Thailand). */ - ThTh = "th-th", - /** Query language value for Lithuanian (Lithuania). */ - LtLt = "lt-lt", - /** Query language value for Ukrainian (Ukraine). */ - UkUa = "uk-ua", - /** Query language value for Latvian (Latvia). */ - LvLv = "lv-lv", - /** Query language value for Estonian (Estonia). */ - EtEe = "et-ee", - /** Query language value for Catalan. */ - CaEs = "ca-es", - /** Query language value for Finnish (Finland). */ - FiFi = "fi-fi", - /** Query language value for Serbian (Bosnia and Herzegovina). */ - SrBa = "sr-ba", - /** Query language value for Serbian (Montenegro). */ - SrMe = "sr-me", - /** Query language value for Serbian (Serbia). */ - SrRs = "sr-rs", - /** Query language value for Slovak (Slovakia). */ - SkSk = "sk-sk", - /** Query language value for Norwegian (Norway). */ - NbNo = "nb-no", - /** Query language value for Armenian (Armenia). */ - HyAm = "hy-am", - /** Query language value for Bengali (India). */ - BnIn = "bn-in", - /** Query language value for Basque. */ - EuEs = "eu-es", - /** Query language value for Galician. */ - GlEs = "gl-es", - /** Query language value for Gujarati (India). */ - GuIn = "gu-in", - /** Query language value for Hebrew (Israel). */ - HeIl = "he-il", - /** Query language value for Irish (Ireland). */ - GaIe = "ga-ie", - /** Query language value for Kannada (India). */ - KnIn = "kn-in", - /** Query language value for Malayalam (India). */ - MlIn = "ml-in", - /** Query language value for Marathi (India). */ - MrIn = "mr-in", - /** Query language value for Persian (U.A.E.). */ - FaAe = "fa-ae", - /** Query language value for Punjabi (India). */ - PaIn = "pa-in", - /** Query language value for Telugu (India). */ - TeIn = "te-in", - /** Query language value for Urdu (Pakistan). */ - UrPk = "ur-pk", -} - -/** - * Defines values for QueryLanguage. \ - * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Query language not specified. \ - * **en-us**: Query language value for English (United States). \ - * **en-gb**: Query language value for English (Great Britain). \ - * **en-in**: Query language value for English (India). \ - * **en-ca**: Query language value for English (Canada). \ - * **en-au**: Query language value for English (Australia). \ - * **fr-fr**: Query language value for French (France). \ - * **fr-ca**: Query language value for French (Canada). \ - * **de-de**: Query language value for German (Germany). \ - * **es-es**: Query language value for Spanish (Spain). \ - * **es-mx**: Query language value for Spanish (Mexico). \ - * **zh-cn**: Query language value for Chinese (China). \ - * **zh-tw**: Query language value for Chinese (Taiwan). \ - * **pt-br**: Query language value for Portuguese (Brazil). \ - * **pt-pt**: Query language value for Portuguese (Portugal). \ - * **it-it**: Query language value for Italian (Italy). \ - * **ja-jp**: Query language value for Japanese (Japan). \ - * **ko-kr**: Query language value for Korean (Korea). \ - * **ru-ru**: Query language value for Russian (Russia). \ - * **cs-cz**: Query language value for Czech (Czech Republic). \ - * **nl-be**: Query language value for Dutch (Belgium). \ - * **nl-nl**: Query language value for Dutch (Netherlands). \ - * **hu-hu**: Query language value for Hungarian (Hungary). \ - * **pl-pl**: Query language value for Polish (Poland). \ - * **sv-se**: Query language value for Swedish (Sweden). \ - * **tr-tr**: Query language value for Turkish (Turkey). \ - * **hi-in**: Query language value for Hindi (India). \ - * **ar-sa**: Query language value for Arabic (Saudi Arabia). \ - * **ar-eg**: Query language value for Arabic (Egypt). \ - * **ar-ma**: Query language value for Arabic (Morocco). \ - * **ar-kw**: Query language value for Arabic (Kuwait). \ - * **ar-jo**: Query language value for Arabic (Jordan). \ - * **da-dk**: Query language value for Danish (Denmark). \ - * **no-no**: Query language value for Norwegian (Norway). \ - * **bg-bg**: Query language value for Bulgarian (Bulgaria). \ - * **hr-hr**: Query language value for Croatian (Croatia). \ - * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \ - * **ms-my**: Query language value for Malay (Malaysia). \ - * **ms-bn**: Query language value for Malay (Brunei Darussalam). \ - * **sl-sl**: Query language value for Slovenian (Slovenia). \ - * **ta-in**: Query language value for Tamil (India). \ - * **vi-vn**: Query language value for Vietnamese (Viet Nam). \ - * **el-gr**: Query language value for Greek (Greece). \ - * **ro-ro**: Query language value for Romanian (Romania). \ - * **is-is**: Query language value for Icelandic (Iceland). \ - * **id-id**: Query language value for Indonesian (Indonesia). \ - * **th-th**: Query language value for Thai (Thailand). \ - * **lt-lt**: Query language value for Lithuanian (Lithuania). \ - * **uk-ua**: Query language value for Ukrainian (Ukraine). \ - * **lv-lv**: Query language value for Latvian (Latvia). \ - * **et-ee**: Query language value for Estonian (Estonia). \ - * **ca-es**: Query language value for Catalan. \ - * **fi-fi**: Query language value for Finnish (Finland). \ - * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \ - * **sr-me**: Query language value for Serbian (Montenegro). \ - * **sr-rs**: Query language value for Serbian (Serbia). \ - * **sk-sk**: Query language value for Slovak (Slovakia). \ - * **nb-no**: Query language value for Norwegian (Norway). \ - * **hy-am**: Query language value for Armenian (Armenia). \ - * **bn-in**: Query language value for Bengali (India). \ - * **eu-es**: Query language value for Basque. \ - * **gl-es**: Query language value for Galician. \ - * **gu-in**: Query language value for Gujarati (India). \ - * **he-il**: Query language value for Hebrew (Israel). \ - * **ga-ie**: Query language value for Irish (Ireland). \ - * **kn-in**: Query language value for Kannada (India). \ - * **ml-in**: Query language value for Malayalam (India). \ - * **mr-in**: Query language value for Marathi (India). \ - * **fa-ae**: Query language value for Persian (U.A.E.). \ - * **pa-in**: Query language value for Punjabi (India). \ - * **te-in**: Query language value for Telugu (India). \ - * **ur-pk**: Query language value for Urdu (Pakistan). - */ -export type QueryLanguage = string; - -/** Known values of {@link QuerySpellerType} that the service accepts. */ -export enum KnownQuerySpellerType { - /** Speller not enabled. */ - None = "none", - /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */ - Lexicon = "lexicon", -} - -/** - * Defines values for QuerySpellerType. \ - * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Speller not enabled. \ - * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. - */ -export type QuerySpellerType = string; - -/** Known values of {@link VectorQueryKind} that the service accepts. */ -export enum KnownVectorQueryKind { - /** Vector query where a raw vector value is provided. */ - Vector = "vector", - /** Vector query where a text value that needs to be vectorized is provided. */ - Text = "text", - /** Vector query where an url that represents an image value that needs to be vectorized is provided. */ - ImageUrl = "imageUrl", - /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */ - ImageBinary = "imageBinary", -} - -/** - * Defines values for VectorQueryKind. \ - * {@link KnownVectorQueryKind} can be used interchangeably with VectorQueryKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **vector**: Vector query where a raw vector value is provided. \ - * **text**: Vector query where a text value that needs to be vectorized is provided. \ - * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided. \ - * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. - */ -export type VectorQueryKind = string; - -/** Known values of {@link VectorThresholdKind} that the service accepts. */ -export enum KnownVectorThresholdKind { - /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ - VectorSimilarity = "vectorSimilarity", - /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ - SearchScore = "searchScore", -} - -/** - * Defines values for VectorThresholdKind. \ - * {@link KnownVectorThresholdKind} can be used interchangeably with VectorThresholdKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **vectorSimilarity**: The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. \ - * **searchScore**: The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. - */ -export type VectorThresholdKind = string; - -/** Known values of {@link VectorFilterMode} that the service accepts. */ -export enum KnownVectorFilterMode { - /** The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. */ - PostFilter = "postFilter", - /** The filter will be applied before the search query. */ - PreFilter = "preFilter", - /** The filter will be applied after the global top-k candidate set of vector results is returned. This will result in fewer results than requested by the parameter 'k'. */ - StrictPostFilter = "strictPostFilter", -} - -/** - * Defines values for VectorFilterMode. \ - * {@link KnownVectorFilterMode} can be used interchangeably with VectorFilterMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **postFilter**: The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. \ - * **preFilter**: The filter will be applied before the search query. \ - * **strictPostFilter**: The filter will be applied after the global top-k candidate set of vector results is returned. This will result in fewer results than requested by the parameter 'k'. - */ -export type VectorFilterMode = string; - -/** Known values of {@link HybridCountAndFacetMode} that the service accepts. */ -export enum KnownHybridCountAndFacetMode { - /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */ - CountRetrievableResults = "countRetrievableResults", - /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */ - CountAllResults = "countAllResults", -} - -/** - * Defines values for HybridCountAndFacetMode. \ - * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \ - * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. - */ -export type HybridCountAndFacetMode = string; - -/** Known values of {@link SemanticFieldState} that the service accepts. */ -export enum KnownSemanticFieldState { - /** The field was fully used for semantic enrichment. */ - Used = "used", - /** The field was not used for semantic enrichment. */ - Unused = "unused", - /** The field was partially used for semantic enrichment. */ - Partial = "partial", -} - -/** - * Defines values for SemanticFieldState. \ - * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **used**: The field was fully used for semantic enrichment. \ - * **unused**: The field was not used for semantic enrichment. \ - * **partial**: The field was partially used for semantic enrichment. - */ -export type SemanticFieldState = string; - -/** Known values of {@link SemanticErrorReason} that the service accepts. */ -export enum KnownSemanticErrorReason { - /** If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. */ - MaxWaitExceeded = "maxWaitExceeded", - /** The request was throttled. Only the base results were returned. */ - CapacityOverloaded = "capacityOverloaded", - /** At least one step of the semantic process failed. */ - Transient = "transient", -} - -/** - * Defines values for SemanticErrorReason. \ - * {@link KnownSemanticErrorReason} can be used interchangeably with SemanticErrorReason, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **maxWaitExceeded**: If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. \ - * **capacityOverloaded**: The request was throttled. Only the base results were returned. \ - * **transient**: At least one step of the semantic process failed. - */ -export type SemanticErrorReason = string; - -/** Known values of {@link SemanticSearchResultsType} that the service accepts. */ -export enum KnownSemanticSearchResultsType { - /** Results without any semantic enrichment or reranking. */ - BaseResults = "baseResults", - /** Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. */ - RerankedResults = "rerankedResults", -} - -/** - * Defines values for SemanticSearchResultsType. \ - * {@link KnownSemanticSearchResultsType} can be used interchangeably with SemanticSearchResultsType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **baseResults**: Results without any semantic enrichment or reranking. \ - * **rerankedResults**: Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. - */ -export type SemanticSearchResultsType = string; - -/** Known values of {@link SemanticQueryRewritesResultType} that the service accepts. */ -export enum KnownSemanticQueryRewritesResultType { - /** Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. */ - OriginalQueryOnly = "originalQueryOnly", -} - -/** - * Defines values for SemanticQueryRewritesResultType. \ - * {@link KnownSemanticQueryRewritesResultType} can be used interchangeably with SemanticQueryRewritesResultType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **originalQueryOnly**: Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. - */ -export type SemanticQueryRewritesResultType = string; -/** Defines values for QueryType. */ -export type QueryType = "simple" | "full" | "semantic"; -/** Defines values for SearchMode. */ -export type SearchMode = "any" | "all"; -/** Defines values for ScoringStatistics. */ -export type ScoringStatistics = "local" | "global"; -/** Defines values for IndexActionType. */ -export type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "delete"; -/** Defines values for AutocompleteMode. */ -export type AutocompleteMode = "oneTerm" | "twoTerms" | "oneTermWithContext"; - -/** Optional parameters. */ -export interface DocumentsCountOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the count operation. */ -export type DocumentsCountResponse = { - /** The parsed response body. */ - body: number; -}; - -/** Optional parameters. */ -export interface DocumentsSearchGetOptionalParams - extends coreClient.OperationOptions { - /** Parameter group */ - searchOptions?: SearchOptions; - /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ - searchText?: string; - /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ - xMsQuerySourceAuthorization?: string; - /** A value that enables elevated read that bypass document level permission checks for the query operation. */ - xMsEnableElevatedRead?: boolean; -} - -/** Contains response data for the searchGet operation. */ -export type DocumentsSearchGetResponse = SearchDocumentsResult; - -/** Optional parameters. */ -export interface DocumentsSearchPostOptionalParams - extends coreClient.OperationOptions { - /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ - xMsQuerySourceAuthorization?: string; - /** A value that enables elevated read that bypass document level permission checks for the query operation. */ - xMsEnableElevatedRead?: boolean; -} - -/** Contains response data for the searchPost operation. */ -export type DocumentsSearchPostResponse = SearchDocumentsResult; - -/** Optional parameters. */ -export interface DocumentsGetOptionalParams - extends coreClient.OperationOptions { - /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ - xMsQuerySourceAuthorization?: string; - /** A value that enables elevated read that bypass document level permission checks for the query operation. */ - xMsEnableElevatedRead?: boolean; - /** List of field names to retrieve for the document; Any field not retrieved will be missing from the returned document. */ - selectedFields?: string[]; -} - -/** Contains response data for the get operation. */ -export type DocumentsGetResponse = { [propertyName: string]: any }; - -/** Optional parameters. */ -export interface DocumentsSuggestGetOptionalParams - extends coreClient.OperationOptions { - /** Parameter group */ - suggestOptions?: SuggestOptions; -} - -/** Contains response data for the suggestGet operation. */ -export type DocumentsSuggestGetResponse = SuggestDocumentsResult; - -/** Optional parameters. */ -export interface DocumentsSuggestPostOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the suggestPost operation. */ -export type DocumentsSuggestPostResponse = SuggestDocumentsResult; - -/** Optional parameters. */ -export interface DocumentsIndexOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the index operation. */ -export type DocumentsIndexResponse = IndexDocumentsResult; - -/** Optional parameters. */ -export interface DocumentsAutocompleteGetOptionalParams - extends coreClient.OperationOptions { - /** Parameter group */ - autocompleteOptions?: AutocompleteOptions; -} - -/** Contains response data for the autocompleteGet operation. */ -export type DocumentsAutocompleteGetResponse = AutocompleteResult; - -/** Optional parameters. */ -export interface DocumentsAutocompletePostOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the autocompletePost operation. */ -export type DocumentsAutocompletePostResponse = AutocompleteResult; - -/** Optional parameters. */ -export interface SearchClientOptionalParams - extends coreHttpCompat.ExtendedServiceClientOptions { - /** Overrides client endpoint. */ - endpoint?: string; -} diff --git a/sdk/search/search-documents/src/generated/data/models/mappers.ts b/sdk/search/search-documents/src/generated/data/models/mappers.ts deleted file mode 100644 index 756037c57dd8..000000000000 --- a/sdk/search/search-documents/src/generated/data/models/mappers.ts +++ /dev/null @@ -1,1589 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; - -export const ErrorResponse: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorResponse", - modelProperties: { - error: { - serializedName: "error", - type: { - name: "Composite", - className: "ErrorDetail", - }, - }, - }, - }, -}; - -export const ErrorDetail: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorDetail", - modelProperties: { - code: { - serializedName: "code", - readOnly: true, - type: { - name: "String", - }, - }, - message: { - serializedName: "message", - readOnly: true, - type: { - name: "String", - }, - }, - target: { - serializedName: "target", - readOnly: true, - type: { - name: "String", - }, - }, - details: { - serializedName: "details", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ErrorDetail", - }, - }, - }, - }, - additionalInfo: { - serializedName: "additionalInfo", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ErrorAdditionalInfo", - }, - }, - }, - }, - }, - }, -}; - -export const ErrorAdditionalInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorAdditionalInfo", - modelProperties: { - type: { - serializedName: "type", - readOnly: true, - type: { - name: "String", - }, - }, - info: { - serializedName: "info", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const SearchDocumentsResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchDocumentsResult", - modelProperties: { - count: { - serializedName: "@odata\\.count", - readOnly: true, - type: { - name: "Number", - }, - }, - coverage: { - serializedName: "@search\\.coverage", - readOnly: true, - type: { - name: "Number", - }, - }, - facets: { - serializedName: "@search\\.facets", - readOnly: true, - type: { - name: "Dictionary", - value: { - type: { - name: "Sequence", - element: { - type: { name: "Composite", className: "FacetResult" }, - }, - }, - }, - }, - }, - answers: { - serializedName: "@search\\.answers", - readOnly: true, - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryAnswerResult", - }, - }, - }, - }, - nextPageParameters: { - serializedName: "@search\\.nextPageParameters", - type: { - name: "Composite", - className: "SearchRequest", - }, - }, - results: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchResult", - }, - }, - }, - }, - nextLink: { - serializedName: "@odata\\.nextLink", - readOnly: true, - type: { - name: "String", - }, - }, - semanticPartialResponseReason: { - serializedName: "@search\\.semanticPartialResponseReason", - readOnly: true, - type: { - name: "String", - }, - }, - semanticPartialResponseType: { - serializedName: "@search\\.semanticPartialResponseType", - readOnly: true, - type: { - name: "String", - }, - }, - semanticQueryRewritesResultType: { - serializedName: "@search\\.semanticQueryRewritesResultType", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const FacetResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "FacetResult", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - count: { - serializedName: "count", - readOnly: true, - type: { - name: "Number", - }, - }, - avg: { - serializedName: "avg", - readOnly: true, - type: { - name: "Number", - }, - }, - min: { - serializedName: "min", - readOnly: true, - type: { - name: "Number", - }, - }, - max: { - serializedName: "max", - readOnly: true, - type: { - name: "Number", - }, - }, - sum: { - serializedName: "sum", - readOnly: true, - type: { - name: "Number", - }, - }, - cardinality: { - serializedName: "cardinality", - readOnly: true, - type: { - name: "Number", - }, - }, - facets: { - serializedName: "@search\\.facets", - readOnly: true, - type: { - name: "Dictionary", - value: { - type: { - name: "Sequence", - element: { - type: { name: "Composite", className: "FacetResult" }, - }, - }, - }, - }, - }, - }, - }, -}; - -export const QueryAnswerResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryAnswerResult", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - score: { - serializedName: "score", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - key: { - serializedName: "key", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - text: { - serializedName: "text", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - highlights: { - serializedName: "highlights", - readOnly: true, - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchRequest: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchRequest", - modelProperties: { - includeTotalResultCount: { - serializedName: "count", - type: { - name: "Boolean", - }, - }, - facets: { - serializedName: "facets", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - filter: { - serializedName: "filter", - type: { - name: "String", - }, - }, - highlightFields: { - serializedName: "highlight", - type: { - name: "String", - }, - }, - highlightPostTag: { - serializedName: "highlightPostTag", - type: { - name: "String", - }, - }, - highlightPreTag: { - serializedName: "highlightPreTag", - type: { - name: "String", - }, - }, - minimumCoverage: { - serializedName: "minimumCoverage", - type: { - name: "Number", - }, - }, - orderBy: { - serializedName: "orderby", - type: { - name: "String", - }, - }, - queryType: { - serializedName: "queryType", - type: { - name: "Enum", - allowedValues: ["simple", "full", "semantic"], - }, - }, - scoringStatistics: { - serializedName: "scoringStatistics", - type: { - name: "Enum", - allowedValues: ["local", "global"], - }, - }, - sessionId: { - serializedName: "sessionId", - type: { - name: "String", - }, - }, - scoringParameters: { - serializedName: "scoringParameters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - scoringProfile: { - serializedName: "scoringProfile", - type: { - name: "String", - }, - }, - debug: { - serializedName: "debug", - type: { - name: "String", - }, - }, - searchText: { - serializedName: "search", - type: { - name: "String", - }, - }, - searchFields: { - serializedName: "searchFields", - type: { - name: "String", - }, - }, - searchMode: { - serializedName: "searchMode", - type: { - name: "Enum", - allowedValues: ["any", "all"], - }, - }, - queryLanguage: { - serializedName: "queryLanguage", - type: { - name: "String", - }, - }, - speller: { - serializedName: "speller", - type: { - name: "String", - }, - }, - select: { - serializedName: "select", - type: { - name: "String", - }, - }, - skip: { - serializedName: "skip", - type: { - name: "Number", - }, - }, - top: { - serializedName: "top", - type: { - name: "Number", - }, - }, - semanticConfigurationName: { - serializedName: "semanticConfiguration", - type: { - name: "String", - }, - }, - semanticErrorHandling: { - serializedName: "semanticErrorHandling", - type: { - name: "String", - }, - }, - semanticMaxWaitInMilliseconds: { - constraints: { - InclusiveMinimum: 700, - }, - serializedName: "semanticMaxWaitInMilliseconds", - nullable: true, - type: { - name: "Number", - }, - }, - semanticQuery: { - serializedName: "semanticQuery", - type: { - name: "String", - }, - }, - answers: { - serializedName: "answers", - type: { - name: "String", - }, - }, - captions: { - serializedName: "captions", - type: { - name: "String", - }, - }, - queryRewrites: { - serializedName: "queryRewrites", - type: { - name: "String", - }, - }, - semanticFields: { - serializedName: "semanticFields", - type: { - name: "String", - }, - }, - vectorQueries: { - serializedName: "vectorQueries", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "VectorQuery", - }, - }, - }, - }, - vectorFilterMode: { - serializedName: "vectorFilterMode", - type: { - name: "String", - }, - }, - hybridSearch: { - serializedName: "hybridSearch", - type: { - name: "Composite", - className: "HybridSearch", - }, - }, - }, - }, -}; - -export const VectorQuery: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - kNearestNeighborsCount: { - serializedName: "k", - type: { - name: "Number", - }, - }, - fields: { - serializedName: "fields", - type: { - name: "String", - }, - }, - exhaustive: { - serializedName: "exhaustive", - type: { - name: "Boolean", - }, - }, - oversampling: { - serializedName: "oversampling", - type: { - name: "Number", - }, - }, - weight: { - serializedName: "weight", - type: { - name: "Number", - }, - }, - threshold: { - serializedName: "threshold", - type: { - name: "Composite", - className: "VectorThreshold", - }, - }, - filterOverride: { - serializedName: "filterOverride", - type: { - name: "String", - }, - }, - perDocumentVectorLimit: { - serializedName: "perDocumentVectorLimit", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const VectorThreshold: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorThreshold", - uberParent: "VectorThreshold", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const HybridSearch: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "HybridSearch", - modelProperties: { - maxTextRecallSize: { - serializedName: "maxTextRecallSize", - type: { - name: "Number", - }, - }, - countAndFacetMode: { - serializedName: "countAndFacetMode", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchResult", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - _score: { - serializedName: "@search\\.score", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - _rerankerScore: { - serializedName: "@search\\.rerankerScore", - readOnly: true, - nullable: true, - type: { - name: "Number", - }, - }, - _rerankerBoostedScore: { - serializedName: "@search\\.rerankerBoostedScore", - readOnly: true, - nullable: true, - type: { - name: "Number", - }, - }, - _highlights: { - serializedName: "@search\\.highlights", - readOnly: true, - type: { - name: "Dictionary", - value: { - type: { name: "Sequence", element: { type: { name: "String" } } }, - }, - }, - }, - _captions: { - serializedName: "@search\\.captions", - readOnly: true, - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryCaptionResult", - }, - }, - }, - }, - _documentDebugInfo: { - serializedName: "@search\\.documentDebugInfo", - type: { - name: "Composite", - className: "DocumentDebugInfo", - }, - }, - }, - }, -}; - -export const QueryCaptionResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryCaptionResult", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - text: { - serializedName: "text", - readOnly: true, - type: { - name: "String", - }, - }, - highlights: { - serializedName: "highlights", - readOnly: true, - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DocumentDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DocumentDebugInfo", - modelProperties: { - semantic: { - serializedName: "semantic", - type: { - name: "Composite", - className: "SemanticDebugInfo", - }, - }, - vectors: { - serializedName: "vectors", - type: { - name: "Composite", - className: "VectorsDebugInfo", - }, - }, - innerHits: { - serializedName: "innerHits", - readOnly: true, - type: { - name: "Dictionary", - value: { - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryResultDocumentInnerHit", - }, - }, - }, - }, - }, - }, - }, - }, -}; - -export const SemanticDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SemanticDebugInfo", - modelProperties: { - titleField: { - serializedName: "titleField", - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - }, - }, - contentFields: { - serializedName: "contentFields", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - }, - }, - }, - }, - keywordFields: { - serializedName: "keywordFields", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - }, - }, - }, - }, - rerankerInput: { - serializedName: "rerankerInput", - type: { - name: "Composite", - className: "QueryResultDocumentRerankerInput", - }, - }, - }, - }, -}; - -export const QueryResultDocumentSemanticField: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - modelProperties: { - name: { - serializedName: "name", - readOnly: true, - type: { - name: "String", - }, - }, - state: { - serializedName: "state", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const QueryResultDocumentRerankerInput: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentRerankerInput", - modelProperties: { - title: { - serializedName: "title", - readOnly: true, - type: { - name: "String", - }, - }, - content: { - serializedName: "content", - readOnly: true, - type: { - name: "String", - }, - }, - keywords: { - serializedName: "keywords", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorsDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorsDebugInfo", - modelProperties: { - subscores: { - serializedName: "subscores", - type: { - name: "Composite", - className: "QueryResultDocumentSubscores", - }, - }, - }, - }, -}; - -export const QueryResultDocumentSubscores: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentSubscores", - modelProperties: { - text: { - serializedName: "text", - type: { - name: "Composite", - className: "TextResult", - }, - }, - vectors: { - serializedName: "vectors", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Dictionary", - value: { - type: { - name: "Composite", - className: "SingleVectorFieldResult", - }, - }, - }, - }, - }, - }, - documentBoost: { - serializedName: "documentBoost", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const TextResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "TextResult", - modelProperties: { - searchScore: { - serializedName: "searchScore", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SingleVectorFieldResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SingleVectorFieldResult", - modelProperties: { - searchScore: { - serializedName: "searchScore", - readOnly: true, - type: { - name: "Number", - }, - }, - vectorSimilarity: { - serializedName: "vectorSimilarity", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const QueryResultDocumentInnerHit: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentInnerHit", - modelProperties: { - ordinal: { - serializedName: "ordinal", - readOnly: true, - type: { - name: "Number", - }, - }, - vectors: { - serializedName: "vectors", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Dictionary", - value: { - type: { - name: "Composite", - className: "SingleVectorFieldResult", - }, - }, - }, - }, - }, - }, - }, - }, -}; - -export const SuggestDocumentsResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SuggestDocumentsResult", - modelProperties: { - results: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SuggestResult", - }, - }, - }, - }, - coverage: { - serializedName: "@search\\.coverage", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SuggestResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SuggestResult", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - _text: { - serializedName: "@search\\.text", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SuggestRequest: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SuggestRequest", - modelProperties: { - filter: { - serializedName: "filter", - type: { - name: "String", - }, - }, - useFuzzyMatching: { - serializedName: "fuzzy", - type: { - name: "Boolean", - }, - }, - highlightPostTag: { - serializedName: "highlightPostTag", - type: { - name: "String", - }, - }, - highlightPreTag: { - serializedName: "highlightPreTag", - type: { - name: "String", - }, - }, - minimumCoverage: { - serializedName: "minimumCoverage", - type: { - name: "Number", - }, - }, - orderBy: { - serializedName: "orderby", - type: { - name: "String", - }, - }, - searchText: { - serializedName: "search", - required: true, - type: { - name: "String", - }, - }, - searchFields: { - serializedName: "searchFields", - type: { - name: "String", - }, - }, - select: { - serializedName: "select", - type: { - name: "String", - }, - }, - suggesterName: { - serializedName: "suggesterName", - required: true, - type: { - name: "String", - }, - }, - top: { - serializedName: "top", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const IndexBatch: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexBatch", - modelProperties: { - actions: { - serializedName: "value", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "IndexAction", - }, - }, - }, - }, - }, - }, -}; - -export const IndexAction: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexAction", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - __actionType: { - serializedName: "@search\\.action", - required: true, - type: { - name: "Enum", - allowedValues: ["upload", "merge", "mergeOrUpload", "delete"], - }, - }, - }, - }, -}; - -export const IndexDocumentsResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexDocumentsResult", - modelProperties: { - results: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "IndexingResult", - }, - }, - }, - }, - }, - }, -}; - -export const IndexingResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexingResult", - modelProperties: { - key: { - serializedName: "key", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - errorMessage: { - serializedName: "errorMessage", - readOnly: true, - type: { - name: "String", - }, - }, - succeeded: { - serializedName: "status", - required: true, - readOnly: true, - type: { - name: "Boolean", - }, - }, - statusCode: { - serializedName: "statusCode", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const AutocompleteResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AutocompleteResult", - modelProperties: { - coverage: { - serializedName: "@search\\.coverage", - readOnly: true, - type: { - name: "Number", - }, - }, - results: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "AutocompleteItem", - }, - }, - }, - }, - }, - }, -}; - -export const AutocompleteItem: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AutocompleteItem", - modelProperties: { - text: { - serializedName: "text", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - queryPlusText: { - serializedName: "queryPlusText", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AutocompleteRequest: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AutocompleteRequest", - modelProperties: { - searchText: { - serializedName: "search", - required: true, - type: { - name: "String", - }, - }, - autocompleteMode: { - serializedName: "autocompleteMode", - type: { - name: "Enum", - allowedValues: ["oneTerm", "twoTerms", "oneTermWithContext"], - }, - }, - filter: { - serializedName: "filter", - type: { - name: "String", - }, - }, - useFuzzyMatching: { - serializedName: "fuzzy", - type: { - name: "Boolean", - }, - }, - highlightPostTag: { - serializedName: "highlightPostTag", - type: { - name: "String", - }, - }, - highlightPreTag: { - serializedName: "highlightPreTag", - type: { - name: "String", - }, - }, - minimumCoverage: { - serializedName: "minimumCoverage", - type: { - name: "Number", - }, - }, - searchFields: { - serializedName: "searchFields", - type: { - name: "String", - }, - }, - suggesterName: { - serializedName: "suggesterName", - required: true, - type: { - name: "String", - }, - }, - top: { - serializedName: "top", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const DebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DebugInfo", - modelProperties: { - queryRewrites: { - serializedName: "queryRewrites", - type: { - name: "Composite", - className: "QueryRewritesDebugInfo", - }, - }, - }, - }, -}; - -export const QueryRewritesDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryRewritesDebugInfo", - modelProperties: { - text: { - serializedName: "text", - type: { - name: "Composite", - className: "QueryRewritesValuesDebugInfo", - }, - }, - vectors: { - serializedName: "vectors", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryRewritesValuesDebugInfo", - }, - }, - }, - }, - }, - }, -}; - -export const QueryRewritesValuesDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryRewritesValuesDebugInfo", - modelProperties: { - inputQuery: { - serializedName: "inputQuery", - readOnly: true, - type: { - name: "String", - }, - }, - rewrites: { - serializedName: "rewrites", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const VectorizedQuery: coreClient.CompositeMapper = { - serializedName: "vector", - type: { - name: "Composite", - className: "VectorizedQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: VectorQuery.type.polymorphicDiscriminator, - modelProperties: { - ...VectorQuery.type.modelProperties, - vector: { - serializedName: "vector", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Number", - }, - }, - }, - }, - }, - }, -}; - -export const VectorizableTextQuery: coreClient.CompositeMapper = { - serializedName: "text", - type: { - name: "Composite", - className: "VectorizableTextQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: VectorQuery.type.polymorphicDiscriminator, - modelProperties: { - ...VectorQuery.type.modelProperties, - text: { - serializedName: "text", - required: true, - type: { - name: "String", - }, - }, - queryRewrites: { - serializedName: "queryRewrites", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorizableImageUrlQuery: coreClient.CompositeMapper = { - serializedName: "imageUrl", - type: { - name: "Composite", - className: "VectorizableImageUrlQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: VectorQuery.type.polymorphicDiscriminator, - modelProperties: { - ...VectorQuery.type.modelProperties, - url: { - serializedName: "url", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorizableImageBinaryQuery: coreClient.CompositeMapper = { - serializedName: "imageBinary", - type: { - name: "Composite", - className: "VectorizableImageBinaryQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: VectorQuery.type.polymorphicDiscriminator, - modelProperties: { - ...VectorQuery.type.modelProperties, - base64Image: { - serializedName: "base64Image", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorSimilarityThreshold: coreClient.CompositeMapper = { - serializedName: "vectorSimilarity", - type: { - name: "Composite", - className: "VectorSimilarityThreshold", - uberParent: "VectorThreshold", - polymorphicDiscriminator: VectorThreshold.type.polymorphicDiscriminator, - modelProperties: { - ...VectorThreshold.type.modelProperties, - value: { - serializedName: "value", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SearchScoreThreshold: coreClient.CompositeMapper = { - serializedName: "searchScore", - type: { - name: "Composite", - className: "SearchScoreThreshold", - uberParent: "VectorThreshold", - polymorphicDiscriminator: VectorThreshold.type.polymorphicDiscriminator, - modelProperties: { - ...VectorThreshold.type.modelProperties, - value: { - serializedName: "value", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export let discriminators = { - VectorQuery: VectorQuery, - VectorThreshold: VectorThreshold, - "VectorQuery.vector": VectorizedQuery, - "VectorQuery.text": VectorizableTextQuery, - "VectorQuery.imageUrl": VectorizableImageUrlQuery, - "VectorQuery.imageBinary": VectorizableImageBinaryQuery, - "VectorThreshold.vectorSimilarity": VectorSimilarityThreshold, - "VectorThreshold.searchScore": SearchScoreThreshold, -}; diff --git a/sdk/search/search-documents/src/generated/data/models/parameters.ts b/sdk/search/search-documents/src/generated/data/models/parameters.ts deleted file mode 100644 index e5037ce302ec..000000000000 --- a/sdk/search/search-documents/src/generated/data/models/parameters.ts +++ /dev/null @@ -1,709 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - OperationParameter, - OperationURLParameter, - OperationQueryParameter, -} from "@azure/core-client"; -import { - SearchRequest as SearchRequestMapper, - SuggestRequest as SuggestRequestMapper, - IndexBatch as IndexBatchMapper, - AutocompleteRequest as AutocompleteRequestMapper, -} from "../models/mappers.js"; - -export const accept: OperationParameter = { - parameterPath: "accept", - mapper: { - defaultValue: "application/json", - isConstant: true, - serializedName: "Accept", - type: { - name: "String", - }, - }, -}; - -export const endpoint: OperationURLParameter = { - parameterPath: "endpoint", - mapper: { - serializedName: "endpoint", - required: true, - type: { - name: "String", - }, - }, - skipEncoding: true, -}; - -export const indexName: OperationURLParameter = { - parameterPath: "indexName", - mapper: { - serializedName: "indexName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const apiVersion: OperationQueryParameter = { - parameterPath: "apiVersion", - mapper: { - serializedName: "api-version", - required: true, - type: { - name: "String", - }, - }, -}; - -export const searchText: OperationQueryParameter = { - parameterPath: ["options", "searchText"], - mapper: { - serializedName: "search", - type: { - name: "String", - }, - }, -}; - -export const includeTotalResultCount: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "includeTotalResultCount"], - mapper: { - serializedName: "$count", - type: { - name: "Boolean", - }, - }, -}; - -export const facets: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "facets"], - mapper: { - serializedName: "facet", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "Multi", -}; - -export const filter: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "filter"], - mapper: { - serializedName: "$filter", - type: { - name: "String", - }, - }, -}; - -export const highlightFields: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "highlightFields"], - mapper: { - serializedName: "highlight", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const highlightPostTag: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "highlightPostTag"], - mapper: { - serializedName: "highlightPostTag", - type: { - name: "String", - }, - }, -}; - -export const highlightPreTag: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "highlightPreTag"], - mapper: { - serializedName: "highlightPreTag", - type: { - name: "String", - }, - }, -}; - -export const minimumCoverage: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "minimumCoverage"], - mapper: { - serializedName: "minimumCoverage", - type: { - name: "Number", - }, - }, -}; - -export const orderBy: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "orderBy"], - mapper: { - serializedName: "$orderby", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const queryType: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "queryType"], - mapper: { - serializedName: "queryType", - type: { - name: "Enum", - allowedValues: ["simple", "full", "semantic"], - }, - }, -}; - -export const scoringParameters: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "scoringParameters"], - mapper: { - serializedName: "scoringParameter", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "Multi", -}; - -export const scoringProfile: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "scoringProfile"], - mapper: { - serializedName: "scoringProfile", - type: { - name: "String", - }, - }, -}; - -export const searchFields: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "searchFields"], - mapper: { - serializedName: "searchFields", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const searchMode: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "searchMode"], - mapper: { - serializedName: "searchMode", - type: { - name: "Enum", - allowedValues: ["any", "all"], - }, - }, -}; - -export const scoringStatistics: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "scoringStatistics"], - mapper: { - serializedName: "scoringStatistics", - type: { - name: "Enum", - allowedValues: ["local", "global"], - }, - }, -}; - -export const sessionId: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "sessionId"], - mapper: { - serializedName: "sessionId", - type: { - name: "String", - }, - }, -}; - -export const select: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "select"], - mapper: { - serializedName: "$select", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const skip: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "skip"], - mapper: { - serializedName: "$skip", - type: { - name: "Number", - }, - }, -}; - -export const top: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "top"], - mapper: { - serializedName: "$top", - type: { - name: "Number", - }, - }, -}; - -export const semanticConfiguration: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "semanticConfiguration"], - mapper: { - serializedName: "semanticConfiguration", - type: { - name: "String", - }, - }, -}; - -export const semanticErrorHandling: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "semanticErrorHandling"], - mapper: { - serializedName: "semanticErrorHandling", - type: { - name: "String", - }, - }, -}; - -export const semanticMaxWaitInMilliseconds: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "semanticMaxWaitInMilliseconds"], - mapper: { - constraints: { - InclusiveMinimum: 700, - }, - serializedName: "semanticMaxWaitInMilliseconds", - type: { - name: "Number", - }, - }, -}; - -export const answers: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "answers"], - mapper: { - serializedName: "answers", - type: { - name: "String", - }, - }, -}; - -export const captions: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "captions"], - mapper: { - serializedName: "captions", - type: { - name: "String", - }, - }, -}; - -export const semanticQuery: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "semanticQuery"], - mapper: { - serializedName: "semanticQuery", - type: { - name: "String", - }, - }, -}; - -export const queryRewrites: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "queryRewrites"], - mapper: { - serializedName: "queryRewrites", - type: { - name: "String", - }, - }, -}; - -export const debug: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "debug"], - mapper: { - serializedName: "debug", - type: { - name: "String", - }, - }, -}; - -export const queryLanguage: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "queryLanguage"], - mapper: { - serializedName: "queryLanguage", - type: { - name: "String", - }, - }, -}; - -export const speller: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "speller"], - mapper: { - serializedName: "speller", - type: { - name: "String", - }, - }, -}; - -export const semanticFields: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "semanticFields"], - mapper: { - serializedName: "semanticFields", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const xMsQuerySourceAuthorization: OperationParameter = { - parameterPath: ["options", "xMsQuerySourceAuthorization"], - mapper: { - serializedName: "x-ms-query-source-authorization", - type: { - name: "String", - }, - }, -}; - -export const xMsEnableElevatedRead: OperationParameter = { - parameterPath: ["options", "xMsEnableElevatedRead"], - mapper: { - serializedName: "x-ms-enable-elevated-read", - type: { - name: "Boolean", - }, - }, -}; - -export const contentType: OperationParameter = { - parameterPath: ["options", "contentType"], - mapper: { - defaultValue: "application/json", - isConstant: true, - serializedName: "Content-Type", - type: { - name: "String", - }, - }, -}; - -export const searchRequest: OperationParameter = { - parameterPath: "searchRequest", - mapper: SearchRequestMapper, -}; - -export const key: OperationURLParameter = { - parameterPath: "key", - mapper: { - serializedName: "key", - required: true, - type: { - name: "String", - }, - }, -}; - -export const selectedFields: OperationQueryParameter = { - parameterPath: ["options", "selectedFields"], - mapper: { - serializedName: "$select", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const searchText1: OperationQueryParameter = { - parameterPath: "searchText", - mapper: { - serializedName: "search", - required: true, - type: { - name: "String", - }, - }, -}; - -export const suggesterName: OperationQueryParameter = { - parameterPath: "suggesterName", - mapper: { - serializedName: "suggesterName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const filter1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "filter"], - mapper: { - serializedName: "$filter", - type: { - name: "String", - }, - }, -}; - -export const useFuzzyMatching: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "useFuzzyMatching"], - mapper: { - serializedName: "fuzzy", - type: { - name: "Boolean", - }, - }, -}; - -export const highlightPostTag1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "highlightPostTag"], - mapper: { - serializedName: "highlightPostTag", - type: { - name: "String", - }, - }, -}; - -export const highlightPreTag1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "highlightPreTag"], - mapper: { - serializedName: "highlightPreTag", - type: { - name: "String", - }, - }, -}; - -export const minimumCoverage1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "minimumCoverage"], - mapper: { - serializedName: "minimumCoverage", - type: { - name: "Number", - }, - }, -}; - -export const orderBy1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "orderBy"], - mapper: { - serializedName: "$orderby", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const searchFields1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "searchFields"], - mapper: { - serializedName: "searchFields", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const select1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "select"], - mapper: { - serializedName: "$select", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const top1: OperationQueryParameter = { - parameterPath: ["options", "suggestOptions", "top"], - mapper: { - serializedName: "$top", - type: { - name: "Number", - }, - }, -}; - -export const suggestRequest: OperationParameter = { - parameterPath: "suggestRequest", - mapper: SuggestRequestMapper, -}; - -export const batch: OperationParameter = { - parameterPath: "batch", - mapper: IndexBatchMapper, -}; - -export const autocompleteMode: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "autocompleteMode"], - mapper: { - serializedName: "autocompleteMode", - type: { - name: "Enum", - allowedValues: ["oneTerm", "twoTerms", "oneTermWithContext"], - }, - }, -}; - -export const filter2: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "filter"], - mapper: { - serializedName: "$filter", - type: { - name: "String", - }, - }, -}; - -export const useFuzzyMatching1: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "useFuzzyMatching"], - mapper: { - serializedName: "fuzzy", - type: { - name: "Boolean", - }, - }, -}; - -export const highlightPostTag2: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "highlightPostTag"], - mapper: { - serializedName: "highlightPostTag", - type: { - name: "String", - }, - }, -}; - -export const highlightPreTag2: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "highlightPreTag"], - mapper: { - serializedName: "highlightPreTag", - type: { - name: "String", - }, - }, -}; - -export const minimumCoverage2: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "minimumCoverage"], - mapper: { - serializedName: "minimumCoverage", - type: { - name: "Number", - }, - }, -}; - -export const searchFields2: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "searchFields"], - mapper: { - serializedName: "searchFields", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - -export const top2: OperationQueryParameter = { - parameterPath: ["options", "autocompleteOptions", "top"], - mapper: { - serializedName: "$top", - type: { - name: "Number", - }, - }, -}; - -export const autocompleteRequest: OperationParameter = { - parameterPath: "autocompleteRequest", - mapper: AutocompleteRequestMapper, -}; diff --git a/sdk/search/search-documents/src/generated/data/operations/documents.ts b/sdk/search/search-documents/src/generated/data/operations/documents.ts deleted file mode 100644 index 602d25bf0bc5..000000000000 --- a/sdk/search/search-documents/src/generated/data/operations/documents.ts +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { Documents } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchClient } from "../searchClient.js"; -import { - DocumentsCountOptionalParams, - DocumentsCountResponse, - DocumentsSearchGetOptionalParams, - DocumentsSearchGetResponse, - SearchRequest, - DocumentsSearchPostOptionalParams, - DocumentsSearchPostResponse, - DocumentsGetOptionalParams, - DocumentsGetResponse, - DocumentsSuggestGetOptionalParams, - DocumentsSuggestGetResponse, - SuggestRequest, - DocumentsSuggestPostOptionalParams, - DocumentsSuggestPostResponse, - IndexBatch, - DocumentsIndexOptionalParams, - DocumentsIndexResponse, - DocumentsAutocompleteGetOptionalParams, - DocumentsAutocompleteGetResponse, - AutocompleteRequest, - DocumentsAutocompletePostOptionalParams, - DocumentsAutocompletePostResponse, -} from "../models/index.js"; - -/** Class containing Documents operations. */ -export class DocumentsImpl implements Documents { - private readonly client: SearchClient; - - /** - * Initialize a new instance of the class Documents class. - * @param client Reference to the service client - */ - constructor(client: SearchClient) { - this.client = client; - } - - /** - * Queries the number of documents in the index. - * @param options The options parameters. - */ - count( - options?: DocumentsCountOptionalParams, - ): Promise { - return this.client.sendOperationRequest({ options }, countOperationSpec); - } - - /** - * Searches for documents in the index. - * @param options The options parameters. - */ - searchGet( - options?: DocumentsSearchGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { options }, - searchGetOperationSpec, - ); - } - - /** - * Searches for documents in the index. - * @param searchRequest The definition of the Search request. - * @param options The options parameters. - */ - searchPost( - searchRequest: SearchRequest, - options?: DocumentsSearchPostOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { searchRequest, options }, - searchPostOperationSpec, - ); - } - - /** - * Retrieves a document from the index. - * @param key The key of the document to retrieve. - * @param options The options parameters. - */ - get( - key: string, - options?: DocumentsGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest({ key, options }, getOperationSpec); - } - - /** - * Suggests documents in the index that match the given partial query text. - * @param searchText The search text to use to suggest documents. Must be at least 1 character, and no - * more than 100 characters. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's part - * of the index definition. - * @param options The options parameters. - */ - suggestGet( - searchText: string, - suggesterName: string, - options?: DocumentsSuggestGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { searchText, suggesterName, options }, - suggestGetOperationSpec, - ); - } - - /** - * Suggests documents in the index that match the given partial query text. - * @param suggestRequest The Suggest request. - * @param options The options parameters. - */ - suggestPost( - suggestRequest: SuggestRequest, - options?: DocumentsSuggestPostOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { suggestRequest, options }, - suggestPostOperationSpec, - ); - } - - /** - * Sends a batch of document write actions to the index. - * @param batch The batch of index actions. - * @param options The options parameters. - */ - index( - batch: IndexBatch, - options?: DocumentsIndexOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { batch, options }, - indexOperationSpec, - ); - } - - /** - * Autocompletes incomplete query terms based on input text and matching terms in the index. - * @param searchText The incomplete term which should be auto-completed. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's part - * of the index definition. - * @param options The options parameters. - */ - autocompleteGet( - searchText: string, - suggesterName: string, - options?: DocumentsAutocompleteGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { searchText, suggesterName, options }, - autocompleteGetOperationSpec, - ); - } - - /** - * Autocompletes incomplete query terms based on input text and matching terms in the index. - * @param autocompleteRequest The definition of the Autocomplete request. - * @param options The options parameters. - */ - autocompletePost( - autocompleteRequest: AutocompleteRequest, - options?: DocumentsAutocompletePostOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { autocompleteRequest, options }, - autocompletePostOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const countOperationSpec: coreClient.OperationSpec = { - path: "/docs/$count", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: { type: { name: "Number" } }, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept], - serializer, -}; -const searchGetOperationSpec: coreClient.OperationSpec = { - path: "/docs", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchDocumentsResult, - }, - 206: { - bodyMapper: Mappers.SearchDocumentsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [ - Parameters.apiVersion, - Parameters.searchText, - Parameters.includeTotalResultCount, - Parameters.facets, - Parameters.filter, - Parameters.highlightFields, - Parameters.highlightPostTag, - Parameters.highlightPreTag, - Parameters.minimumCoverage, - Parameters.orderBy, - Parameters.queryType, - Parameters.scoringParameters, - Parameters.scoringProfile, - Parameters.searchFields, - Parameters.searchMode, - Parameters.scoringStatistics, - Parameters.sessionId, - Parameters.select, - Parameters.skip, - Parameters.top, - Parameters.semanticConfiguration, - Parameters.semanticErrorHandling, - Parameters.semanticMaxWaitInMilliseconds, - Parameters.answers, - Parameters.captions, - Parameters.semanticQuery, - Parameters.queryRewrites, - Parameters.debug, - Parameters.queryLanguage, - Parameters.speller, - Parameters.semanticFields, - ], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [ - Parameters.accept, - Parameters.xMsQuerySourceAuthorization, - Parameters.xMsEnableElevatedRead, - ], - serializer, -}; -const searchPostOperationSpec: coreClient.OperationSpec = { - path: "/docs/search.post.search", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: Mappers.SearchDocumentsResult, - }, - 206: { - bodyMapper: Mappers.SearchDocumentsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.searchRequest, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [ - Parameters.accept, - Parameters.xMsQuerySourceAuthorization, - Parameters.xMsEnableElevatedRead, - Parameters.contentType, - ], - mediaType: "json", - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/docs('{key}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: { - type: { name: "Dictionary", value: { type: { name: "any" } } }, - }, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion, Parameters.selectedFields], - urlParameters: [Parameters.endpoint, Parameters.indexName, Parameters.key], - headerParameters: [ - Parameters.accept, - Parameters.xMsQuerySourceAuthorization, - Parameters.xMsEnableElevatedRead, - ], - serializer, -}; -const suggestGetOperationSpec: coreClient.OperationSpec = { - path: "/docs/search.suggest", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SuggestDocumentsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [ - Parameters.apiVersion, - Parameters.searchText1, - Parameters.suggesterName, - Parameters.filter1, - Parameters.useFuzzyMatching, - Parameters.highlightPostTag1, - Parameters.highlightPreTag1, - Parameters.minimumCoverage1, - Parameters.orderBy1, - Parameters.searchFields1, - Parameters.select1, - Parameters.top1, - ], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept], - serializer, -}; -const suggestPostOperationSpec: coreClient.OperationSpec = { - path: "/docs/search.post.suggest", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: Mappers.SuggestDocumentsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.suggestRequest, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept, Parameters.contentType], - mediaType: "json", - serializer, -}; -const indexOperationSpec: coreClient.OperationSpec = { - path: "/docs/search.index", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: Mappers.IndexDocumentsResult, - }, - 207: { - bodyMapper: Mappers.IndexDocumentsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.batch, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept, Parameters.contentType], - mediaType: "json", - serializer, -}; -const autocompleteGetOperationSpec: coreClient.OperationSpec = { - path: "/docs/search.autocomplete", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.AutocompleteResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [ - Parameters.apiVersion, - Parameters.searchText1, - Parameters.suggesterName, - Parameters.autocompleteMode, - Parameters.filter2, - Parameters.useFuzzyMatching1, - Parameters.highlightPostTag2, - Parameters.highlightPreTag2, - Parameters.minimumCoverage2, - Parameters.searchFields2, - Parameters.top2, - ], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept], - serializer, -}; -const autocompletePostOperationSpec: coreClient.OperationSpec = { - path: "/docs/search.post.autocomplete", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: Mappers.AutocompleteResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.autocompleteRequest, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept, Parameters.contentType], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/data/operations/index.ts b/sdk/search/search-documents/src/generated/data/operations/index.ts deleted file mode 100644 index e6fde9effe60..000000000000 --- a/sdk/search/search-documents/src/generated/data/operations/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./documents.js"; diff --git a/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts b/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts deleted file mode 100644 index cf365fcb51c8..000000000000 --- a/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - DocumentsCountOptionalParams, - DocumentsCountResponse, - DocumentsSearchGetOptionalParams, - DocumentsSearchGetResponse, - SearchRequest, - DocumentsSearchPostOptionalParams, - DocumentsSearchPostResponse, - DocumentsGetOptionalParams, - DocumentsGetResponse, - DocumentsSuggestGetOptionalParams, - DocumentsSuggestGetResponse, - SuggestRequest, - DocumentsSuggestPostOptionalParams, - DocumentsSuggestPostResponse, - IndexBatch, - DocumentsIndexOptionalParams, - DocumentsIndexResponse, - DocumentsAutocompleteGetOptionalParams, - DocumentsAutocompleteGetResponse, - AutocompleteRequest, - DocumentsAutocompletePostOptionalParams, - DocumentsAutocompletePostResponse, -} from "../models/index.js"; - -/** Interface representing a Documents. */ -export interface Documents { - /** - * Queries the number of documents in the index. - * @param options The options parameters. - */ - count( - options?: DocumentsCountOptionalParams, - ): Promise; - /** - * Searches for documents in the index. - * @param options The options parameters. - */ - searchGet( - options?: DocumentsSearchGetOptionalParams, - ): Promise; - /** - * Searches for documents in the index. - * @param searchRequest The definition of the Search request. - * @param options The options parameters. - */ - searchPost( - searchRequest: SearchRequest, - options?: DocumentsSearchPostOptionalParams, - ): Promise; - /** - * Retrieves a document from the index. - * @param key The key of the document to retrieve. - * @param options The options parameters. - */ - get( - key: string, - options?: DocumentsGetOptionalParams, - ): Promise; - /** - * Suggests documents in the index that match the given partial query text. - * @param searchText The search text to use to suggest documents. Must be at least 1 character, and no - * more than 100 characters. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's part - * of the index definition. - * @param options The options parameters. - */ - suggestGet( - searchText: string, - suggesterName: string, - options?: DocumentsSuggestGetOptionalParams, - ): Promise; - /** - * Suggests documents in the index that match the given partial query text. - * @param suggestRequest The Suggest request. - * @param options The options parameters. - */ - suggestPost( - suggestRequest: SuggestRequest, - options?: DocumentsSuggestPostOptionalParams, - ): Promise; - /** - * Sends a batch of document write actions to the index. - * @param batch The batch of index actions. - * @param options The options parameters. - */ - index( - batch: IndexBatch, - options?: DocumentsIndexOptionalParams, - ): Promise; - /** - * Autocompletes incomplete query terms based on input text and matching terms in the index. - * @param searchText The incomplete term which should be auto-completed. - * @param suggesterName The name of the suggester as specified in the suggesters collection that's part - * of the index definition. - * @param options The options parameters. - */ - autocompleteGet( - searchText: string, - suggesterName: string, - options?: DocumentsAutocompleteGetOptionalParams, - ): Promise; - /** - * Autocompletes incomplete query terms based on input text and matching terms in the index. - * @param autocompleteRequest The definition of the Autocomplete request. - * @param options The options parameters. - */ - autocompletePost( - autocompleteRequest: AutocompleteRequest, - options?: DocumentsAutocompletePostOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts deleted file mode 100644 index e6fde9effe60..000000000000 --- a/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./documents.js"; diff --git a/sdk/search/search-documents/src/generated/data/searchClient.ts b/sdk/search/search-documents/src/generated/data/searchClient.ts deleted file mode 100644 index b05ae879280c..000000000000 --- a/sdk/search/search-documents/src/generated/data/searchClient.ts +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreHttpCompat from "@azure/core-http-compat"; -import { - PipelineRequest, - PipelineResponse, - SendRequest, -} from "@azure/core-rest-pipeline"; -import { DocumentsImpl } from "./operations/index.js"; -import { Documents } from "./operationsInterfaces/index.js"; -import { - ApiVersion20251101Preview, - SearchClientOptionalParams, -} from "./models/index.js"; - -/** @internal */ -export class SearchClient extends coreHttpCompat.ExtendedServiceClient { - endpoint: string; - indexName: string; - apiVersion: ApiVersion20251101Preview; - - /** - * Initializes a new instance of the SearchClient class. - * @param endpoint The endpoint URL of the search service. - * @param indexName The name of the index. - * @param apiVersion Api Version - * @param options The parameter options - */ - constructor( - endpoint: string, - indexName: string, - apiVersion: ApiVersion20251101Preview, - options?: SearchClientOptionalParams, - ) { - if (endpoint === undefined) { - throw new Error("'endpoint' cannot be null"); - } - if (indexName === undefined) { - throw new Error("'indexName' cannot be null"); - } - if (apiVersion === undefined) { - throw new Error("'apiVersion' cannot be null"); - } - - // Initializing default values for options - if (!options) { - options = {}; - } - const defaults: SearchClientOptionalParams = { - requestContentType: "application/json; charset=utf-8", - }; - - const packageDetails = `azsdk-js-search-documents/12.3.0-beta.1`; - const userAgentPrefix = - options.userAgentOptions && options.userAgentOptions.userAgentPrefix - ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` - : `${packageDetails}`; - - const optionsWithDefaults = { - ...defaults, - ...options, - userAgentOptions: { - userAgentPrefix, - }, - endpoint: - options.endpoint ?? - options.baseUri ?? - "{endpoint}/indexes('{indexName}')", - }; - super(optionsWithDefaults); - // Parameter assignments - this.endpoint = endpoint; - this.indexName = indexName; - this.apiVersion = apiVersion; - this.documents = new DocumentsImpl(this); - this.addCustomApiVersionPolicy(apiVersion); - } - - /** A function that adds a policy that sets the api-version (or equivalent) to reflect the library version. */ - private addCustomApiVersionPolicy(apiVersion?: string) { - if (!apiVersion) { - return; - } - const apiVersionPolicy = { - name: "CustomApiVersionPolicy", - async sendRequest( - request: PipelineRequest, - next: SendRequest, - ): Promise { - const param = request.url.split("?"); - if (param.length > 1) { - const newParams = param[1].split("&").map((item) => { - if (item.indexOf("api-version") > -1) { - return "api-version=" + apiVersion; - } else { - return item; - } - }); - request.url = param[0] + "?" + newParams.join("&"); - } - return next(request); - }, - }; - this.pipeline.addPolicy(apiVersionPolicy); - } - - documents: Documents; -} diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/index.ts b/sdk/search/search-documents/src/generated/knowledgeBase/index.ts deleted file mode 100644 index 2bee12aaf341..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/index.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./models/index.js"; -export { SearchClient } from "./searchClient.js"; -export * from "./operationsInterfaces/index.js"; diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/models/index.ts b/sdk/search/search-documents/src/generated/knowledgeBase/models/index.ts deleted file mode 100644 index 2df8a98068d8..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/models/index.ts +++ /dev/null @@ -1,733 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; -import * as coreHttpCompat from "@azure/core-http-compat"; - -export type KnowledgeBaseMessageContentUnion = - | KnowledgeBaseMessageContent - | KnowledgeBaseMessageTextContent - | KnowledgeBaseMessageImageContent; -export type KnowledgeRetrievalIntentUnion = - | KnowledgeRetrievalIntent - | KnowledgeRetrievalSemanticIntent; -export type KnowledgeRetrievalReasoningEffortUnion = - | KnowledgeRetrievalReasoningEffort - | KnowledgeRetrievalMinimalReasoningEffort - | KnowledgeRetrievalLowReasoningEffort - | KnowledgeRetrievalMediumReasoningEffort; -export type KnowledgeSourceParamsUnion = - | KnowledgeSourceParams - | SearchIndexKnowledgeSourceParams - | AzureBlobKnowledgeSourceParams - | IndexedSharePointKnowledgeSourceParams - | IndexedOneLakeKnowledgeSourceParams - | WebKnowledgeSourceParams - | RemoteSharePointKnowledgeSourceParams; -export type KnowledgeBaseActivityRecordUnion = - | KnowledgeBaseActivityRecord - | KnowledgeBaseRetrievalActivityRecordUnion - | KnowledgeBaseModelQueryPlanningActivityRecord - | KnowledgeBaseModelAnswerSynthesisActivityRecord - | KnowledgeBaseAgenticReasoningActivityRecord; -export type KnowledgeBaseReferenceUnion = - | KnowledgeBaseReference - | KnowledgeBaseSearchIndexReference - | KnowledgeBaseAzureBlobReference - | KnowledgeBaseIndexedSharePointReference - | KnowledgeBaseIndexedOneLakeReference - | KnowledgeBaseWebReference - | KnowledgeBaseRemoteSharePointReference; -export type KnowledgeBaseRetrievalActivityRecordUnion = - | KnowledgeBaseRetrievalActivityRecord - | KnowledgeBaseSearchIndexActivityRecord - | KnowledgeBaseAzureBlobActivityRecord - | KnowledgeBaseIndexedSharePointActivityRecord - | KnowledgeBaseIndexedOneLakeActivityRecord - | KnowledgeBaseWebActivityRecord - | KnowledgeBaseRemoteSharePointActivityRecord; - -/** The input contract for the retrieval request. */ -export interface KnowledgeBaseRetrievalRequest { - /** A list of chat message style input. */ - messages?: KnowledgeBaseMessage[]; - /** A list of intended queries to execute without model query planning. */ - intents?: KnowledgeRetrievalIntentUnion[]; - /** The maximum runtime in seconds. */ - maxRuntimeInSeconds?: number; - /** Limits the maximum size of the content in the output. */ - maxOutputSize?: number; - retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; - /** Indicates retrieval results should include activity information. */ - includeActivity?: boolean; - /** The output configuration for this retrieval. */ - outputMode?: KnowledgeRetrievalOutputMode; - /** A list of runtime parameters for the knowledge sources. */ - knowledgeSourceParams?: KnowledgeSourceParamsUnion[]; -} - -/** The natural language message style object. */ -export interface KnowledgeBaseMessage { - /** The role of the tool response. */ - role?: string; - content: KnowledgeBaseMessageContentUnion[]; -} - -/** Specifies the type of the message content. */ -export interface KnowledgeBaseMessageContent { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "text" | "image"; -} - -/** An intended query to execute without model query planning. */ -export interface KnowledgeRetrievalIntent { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "semantic"; -} - -export interface KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "minimal" | "low" | "medium"; -} - -export interface KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: - | "searchIndex" - | "azureBlob" - | "indexedSharePoint" - | "indexedOneLake" - | "web" - | "remoteSharePoint"; - /** The name of the index the params apply to. */ - knowledgeSourceName: string; - /** Indicates whether references should be included for data retrieved from this source. */ - includeReferences?: boolean; - /** Indicates whether references should include the structured data obtained during retrieval in their payload. */ - includeReferenceSourceData?: boolean; - /** Indicates that this knowledge source should bypass source selection and always be queried at retrieval time. */ - alwaysQuerySource?: boolean; - /** The reranker threshold all retrieved documents must meet to be included in the response. */ - rerankerThreshold?: number; -} - -/** The output contract for the retrieval response. */ -export interface KnowledgeBaseRetrievalResponse { - response?: KnowledgeBaseMessage[]; - /** The activity records for tracking progress and billing implications. */ - activity?: KnowledgeBaseActivityRecordUnion[]; - /** The references for the retrieval data used in the response. */ - references?: KnowledgeBaseReferenceUnion[]; -} - -/** Base type for activity records. */ -export interface KnowledgeBaseActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: - | "KnowledgeBaseRetrievalActivityRecord" - | "searchIndex" - | "azureBlob" - | "indexedSharePoint" - | "indexedOneLake" - | "web" - | "remoteSharePoint" - | "modelQueryPlanning" - | "modelAnswerSynthesis" - | "agenticReasoning"; - /** The ID of the activity record. */ - id: number; - /** The elapsed time in milliseconds for the retrieval activity. */ - elapsedMs?: number; - /** The error detail explaining why the operation failed. This property is only included when the activity does not succeed. */ - error?: KnowledgeBaseErrorDetail; -} - -/** The error details. */ -export interface KnowledgeBaseErrorDetail { - /** - * The error code. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly code?: string; - /** - * The error message. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly message?: string; - /** - * The error target. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly target?: string; - /** - * The error details. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly details?: KnowledgeBaseErrorDetail[]; - /** - * The error additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly additionalInfo?: KnowledgeBaseErrorAdditionalInfo[]; -} - -/** The resource management error additional info. */ -export interface KnowledgeBaseErrorAdditionalInfo { - /** - * The additional info type. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly type?: string; - /** - * The additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly info?: Record; -} - -/** Base type for references. */ -export interface KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: - | "searchIndex" - | "azureBlob" - | "indexedSharePoint" - | "indexedOneLake" - | "web" - | "remoteSharePoint"; - /** The ID of the reference. */ - id: string; - /** The source activity ID for the reference. */ - activitySource: number; - /** Dictionary of */ - sourceData?: { [propertyName: string]: any }; - /** The reranker score for the document reference. */ - rerankerScore?: number; -} - -/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */ -export interface ErrorResponse { - /** The error object. */ - error?: ErrorDetail; -} - -/** The error detail. */ -export interface ErrorDetail { - /** - * The error code. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly code?: string; - /** - * The error message. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly message?: string; - /** - * The error target. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly target?: string; - /** - * The error details. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly details?: ErrorDetail[]; - /** - * The error additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly additionalInfo?: ErrorAdditionalInfo[]; -} - -/** The resource management error additional info. */ -export interface ErrorAdditionalInfo { - /** - * The additional info type. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly type?: string; - /** - * The additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly info?: Record; -} - -export interface KnowledgeBaseMessageImageContentImage { - /** The url of the image. */ - url: string; -} - -/** Represents the arguments the search index retrieval activity was run with. */ -export interface KnowledgeBaseSearchIndexActivityArguments { - /** The search string used to query the search index. */ - search?: string; - /** The filter string. */ - filter?: string; - /** What fields were selected for search. */ - sourceDataFields?: SearchIndexFieldReference[]; - /** What fields were searched against. */ - searchFields?: SearchIndexFieldReference[]; - /** What semantic configuration was used from the search index. */ - semanticConfigurationName?: string; -} - -export interface SearchIndexFieldReference { - name: string; -} - -/** Represents the arguments the azure blob retrieval activity was run with. */ -export interface KnowledgeBaseAzureBlobActivityArguments { - /** The search string used to query blob contents. */ - search?: string; -} - -/** Represents the arguments the indexed SharePoint retrieval activity was run with. */ -export interface KnowledgeBaseIndexedSharePointActivityArguments { - /** The search string used to query indexed SharePoint contents. */ - search?: string; -} - -/** Represents the arguments the indexed OneLake retrieval activity was run with. */ -export interface KnowledgeBaseIndexedOneLakeActivityArguments { - /** The search string used to query indexed OneLake contents. */ - search?: string; -} - -/** Represents the arguments the web retrieval activity was run with. */ -export interface KnowledgeBaseWebActivityArguments { - /** The search string used to query the web. */ - search?: string; - /** The language for the retrieval activity. */ - language?: string; - /** The market for the retrieval activity. */ - market?: string; - /** The number of web results returned. */ - count?: number; - /** The freshness for the retrieval activity. */ - freshness?: string; -} - -/** Represents the arguments the remote SharePoint retrieval activity was run with. */ -export interface KnowledgeBaseRemoteSharePointActivityArguments { - /** The search string used to query the remote SharePoint knowledge source. */ - search?: string; - /** The filter expression add-on for the retrieval activity. */ - filterExpressionAddOn?: string; -} - -/** Information about the sensitivity label applied to a SharePoint document. */ -export interface SharePointSensitivityLabelInfo { - /** The display name for the sensitivity label. */ - displayName?: string; - /** The ID of the sensitivity label. */ - sensitivityLabelId?: string; - /** The tooltip that should be displayed for the label in a UI. */ - tooltip?: string; - /** The priority in which the sensitivity label is applied. */ - priority?: number; - /** The color that the UI should display for the label, if configured. */ - color?: string; - /** Indicates whether the sensitivity label enforces encryption. */ - isEncrypted?: boolean; -} - -/** Text message type. */ -export interface KnowledgeBaseMessageTextContent - extends KnowledgeBaseMessageContent { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "text"; - text: string; -} - -/** Text message type. */ -export interface KnowledgeBaseMessageImageContent - extends KnowledgeBaseMessageContent { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "image"; - image: KnowledgeBaseMessageImageContentImage; -} - -export interface KnowledgeRetrievalSemanticIntent - extends KnowledgeRetrievalIntent { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "semantic"; - /** The semantic query to execute */ - search: string; -} - -/** Run knowledge retrieval with minimal reasoning effort. */ -export interface KnowledgeRetrievalMinimalReasoningEffort - extends KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "minimal"; -} - -/** Run knowledge retrieval with low reasoning effort. */ -export interface KnowledgeRetrievalLowReasoningEffort - extends KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "low"; -} - -/** Run knowledge retrieval with medium reasoning effort. */ -export interface KnowledgeRetrievalMediumReasoningEffort - extends KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "medium"; -} - -/** Specifies runtime parameters for a search index knowledge source */ -export interface SearchIndexKnowledgeSourceParams - extends KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "searchIndex"; - /** A filter condition applied to the index (e.g., 'State eq VA'). */ - filterAddOn?: string; -} - -/** Specifies runtime parameters for a azure blob knowledge source */ -export interface AzureBlobKnowledgeSourceParams extends KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureBlob"; -} - -/** Specifies runtime parameters for a indexed SharePoint knowledge source */ -export interface IndexedSharePointKnowledgeSourceParams - extends KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "indexedSharePoint"; -} - -/** Specifies runtime parameters for a indexed OneLake knowledge source */ -export interface IndexedOneLakeKnowledgeSourceParams - extends KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "indexedOneLake"; -} - -/** Specifies runtime parameters for a web knowledge source */ -export interface WebKnowledgeSourceParams extends KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "web"; - /** The language of the web results. */ - language?: string; - /** The market of the web results. */ - market?: string; - /** The number of web results to return. */ - count?: number; - /** The freshness of web results. */ - freshness?: string; -} - -/** Specifies runtime parameters for a remote SharePoint knowledge source */ -export interface RemoteSharePointKnowledgeSourceParams - extends KnowledgeSourceParams { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "remoteSharePoint"; - /** A filter condition applied to the SharePoint data source. It must be specified in the Keyword Query Language syntax. It will be combined as a conjunction with the filter expression specified in the knowledge source definition. */ - filterExpressionAddOn?: string; -} - -/** Represents a retrieval activity record. */ -export interface KnowledgeBaseRetrievalActivityRecord - extends KnowledgeBaseActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: - | "KnowledgeBaseRetrievalActivityRecord" - | "searchIndex" - | "azureBlob" - | "indexedSharePoint" - | "indexedOneLake" - | "web" - | "remoteSharePoint"; - /** The knowledge source for the retrieval activity. */ - knowledgeSourceName?: string; - /** The query time for this retrieval activity. */ - queryTime?: Date; - /** The count of documents retrieved that were sufficiently relevant to pass the reranker threshold. */ - count?: number; -} - -/** Represents an LLM query planning activity record. */ -export interface KnowledgeBaseModelQueryPlanningActivityRecord - extends KnowledgeBaseActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "modelQueryPlanning"; - /** The number of input tokens for the LLM query planning activity. */ - inputTokens?: number; - /** The number of output tokens for the LLM query planning activity. */ - outputTokens?: number; -} - -/** Represents an LLM answer synthesis activity record. */ -export interface KnowledgeBaseModelAnswerSynthesisActivityRecord - extends KnowledgeBaseActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "modelAnswerSynthesis"; - /** The number of input tokens for the LLM answer synthesis activity. */ - inputTokens?: number; - /** The number of output tokens for the LLM answer synthesis activity. */ - outputTokens?: number; -} - -/** Represents an agentic reasoning activity record. */ -export interface KnowledgeBaseAgenticReasoningActivityRecord - extends KnowledgeBaseActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "agenticReasoning"; - /** The number of input tokens for agentic reasoning. */ - reasoningTokens?: number; - retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; -} - -/** Represents an Azure Search document reference. */ -export interface KnowledgeBaseSearchIndexReference - extends KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "searchIndex"; - /** The document key for the reference. */ - docKey?: string; -} - -/** Represents an Azure Blob Storage document reference. */ -export interface KnowledgeBaseAzureBlobReference - extends KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "azureBlob"; - /** The blob URL for the reference. */ - blobUrl?: string; -} - -/** Represents an Azure Blob Storage document reference. */ -export interface KnowledgeBaseIndexedSharePointReference - extends KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "indexedSharePoint"; - /** The document URL for the reference. */ - docUrl?: string; -} - -/** Represents an Azure Blob Storage document reference. */ -export interface KnowledgeBaseIndexedOneLakeReference - extends KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "indexedOneLake"; - /** The document URL for the reference. */ - docUrl?: string; -} - -/** Represents a web document reference. */ -export interface KnowledgeBaseWebReference extends KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "web"; - /** The url the reference data originated from. */ - url?: string; - /** The title of the web document. */ - title?: string; -} - -/** Represents a remote SharePoint document reference. */ -export interface KnowledgeBaseRemoteSharePointReference - extends KnowledgeBaseReference { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "remoteSharePoint"; - /** The url the reference data originated from. */ - webUrl?: string; - /** Information about the sensitivity label applied to a SharePoint document. */ - searchSensitivityLabelInfo?: SharePointSensitivityLabelInfo; -} - -/** Represents a search index retrieval activity record. */ -export interface KnowledgeBaseSearchIndexActivityRecord - extends KnowledgeBaseRetrievalActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "searchIndex"; - /** The search index arguments for the retrieval activity. */ - searchIndexArguments?: KnowledgeBaseSearchIndexActivityArguments; -} - -/** Represents a azure blob retrieval activity record. */ -export interface KnowledgeBaseAzureBlobActivityRecord - extends KnowledgeBaseRetrievalActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "azureBlob"; - /** The azure blob arguments for the retrieval activity. */ - azureBlobArguments?: KnowledgeBaseAzureBlobActivityArguments; -} - -/** Represents a indexed SharePoint retrieval activity record. */ -export interface KnowledgeBaseIndexedSharePointActivityRecord - extends KnowledgeBaseRetrievalActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "indexedSharePoint"; - /** The indexed SharePoint arguments for the retrieval activity. */ - indexedSharePointArguments?: KnowledgeBaseIndexedSharePointActivityArguments; -} - -/** Represents a indexed OneLake retrieval activity record. */ -export interface KnowledgeBaseIndexedOneLakeActivityRecord - extends KnowledgeBaseRetrievalActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "indexedOneLake"; - /** The indexed OneLake arguments for the retrieval activity. */ - indexedOneLakeArguments?: KnowledgeBaseIndexedOneLakeActivityArguments; -} - -/** Represents a web retrieval activity record. */ -export interface KnowledgeBaseWebActivityRecord - extends KnowledgeBaseRetrievalActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "web"; - /** The web arguments for the retrieval activity. */ - webArguments?: KnowledgeBaseWebActivityArguments; -} - -/** Represents a remote SharePoint retrieval activity record. */ -export interface KnowledgeBaseRemoteSharePointActivityRecord - extends KnowledgeBaseRetrievalActivityRecord { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "remoteSharePoint"; - /** The remote SharePoint arguments for the retrieval activity. */ - remoteSharePointArguments?: KnowledgeBaseRemoteSharePointActivityArguments; -} - -/** Known values of {@link ApiVersion20251101Preview} that the service accepts. */ -export enum KnownApiVersion20251101Preview { - /** Api Version '2025-11-01-preview' */ - TwoThousandTwentyFive1101Preview = "2025-11-01-preview", -} - -/** - * Defines values for ApiVersion20251101Preview. \ - * {@link KnownApiVersion20251101Preview} can be used interchangeably with ApiVersion20251101Preview, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **2025-11-01-preview**: Api Version '2025-11-01-preview' - */ -export type ApiVersion20251101Preview = string; - -/** Known values of {@link KnowledgeBaseMessageContentType} that the service accepts. */ -export enum KnownKnowledgeBaseMessageContentType { - /** Text message content kind. */ - Text = "text", - /** Image message content kind. */ - Image = "image", -} - -/** - * Defines values for KnowledgeBaseMessageContentType. \ - * {@link KnownKnowledgeBaseMessageContentType} can be used interchangeably with KnowledgeBaseMessageContentType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **text**: Text message content kind. \ - * **image**: Image message content kind. - */ -export type KnowledgeBaseMessageContentType = string; - -/** Known values of {@link KnowledgeRetrievalIntentType} that the service accepts. */ -export enum KnownKnowledgeRetrievalIntentType { - /** A natural language semantic query intent. */ - Semantic = "semantic", -} - -/** - * Defines values for KnowledgeRetrievalIntentType. \ - * {@link KnownKnowledgeRetrievalIntentType} can be used interchangeably with KnowledgeRetrievalIntentType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **semantic**: A natural language semantic query intent. - */ -export type KnowledgeRetrievalIntentType = string; - -/** Known values of {@link KnowledgeRetrievalReasoningEffortKind} that the service accepts. */ -export enum KnownKnowledgeRetrievalReasoningEffortKind { - /** Does not perform any source selections, query planning, or iterative search. */ - Minimal = "minimal", - /** Use low reasoning during retrieval. */ - Low = "low", - /** Use a moderate amount of reasoning during retrieval. */ - Medium = "medium", -} - -/** - * Defines values for KnowledgeRetrievalReasoningEffortKind. \ - * {@link KnownKnowledgeRetrievalReasoningEffortKind} can be used interchangeably with KnowledgeRetrievalReasoningEffortKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **minimal**: Does not perform any source selections, query planning, or iterative search. \ - * **low**: Use low reasoning during retrieval. \ - * **medium**: Use a moderate amount of reasoning during retrieval. - */ -export type KnowledgeRetrievalReasoningEffortKind = string; - -/** Known values of {@link KnowledgeRetrievalOutputMode} that the service accepts. */ -export enum KnownKnowledgeRetrievalOutputMode { - /** Return data from the knowledge sources directly without generative alteration. */ - ExtractiveData = "extractiveData", - /** Synthesize an answer for the response payload. */ - AnswerSynthesis = "answerSynthesis", -} - -/** - * Defines values for KnowledgeRetrievalOutputMode. \ - * {@link KnownKnowledgeRetrievalOutputMode} can be used interchangeably with KnowledgeRetrievalOutputMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **extractiveData**: Return data from the knowledge sources directly without generative alteration. \ - * **answerSynthesis**: Synthesize an answer for the response payload. - */ -export type KnowledgeRetrievalOutputMode = string; - -/** Known values of {@link KnowledgeSourceKind} that the service accepts. */ -export enum KnownKnowledgeSourceKind { - /** A knowledge source that retrieves data from a Search Index. */ - SearchIndex = "searchIndex", - /** A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index. */ - AzureBlob = "azureBlob", - /** A knowledge source that retrieves data from the web. */ - Web = "web", - /** A knowledge source that retrieves data from a remote SharePoint endpoint. */ - RemoteSharePoint = "remoteSharePoint", - /** A knowledge source that retrieves and ingests data from SharePoint to a Search Index. */ - IndexedSharePoint = "indexedSharePoint", - /** A knowledge source that retrieves and ingests data from OneLake to a Search Index. */ - IndexedOneLake = "indexedOneLake", -} - -/** - * Defines values for KnowledgeSourceKind. \ - * {@link KnownKnowledgeSourceKind} can be used interchangeably with KnowledgeSourceKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **searchIndex**: A knowledge source that retrieves data from a Search Index. \ - * **azureBlob**: A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index. \ - * **web**: A knowledge source that retrieves data from the web. \ - * **remoteSharePoint**: A knowledge source that retrieves data from a remote SharePoint endpoint. \ - * **indexedSharePoint**: A knowledge source that retrieves and ingests data from SharePoint to a Search Index. \ - * **indexedOneLake**: A knowledge source that retrieves and ingests data from OneLake to a Search Index. - */ -export type KnowledgeSourceKind = string; - -/** Optional parameters. */ -export interface KnowledgeRetrievalRetrieveOptionalParams - extends coreClient.OperationOptions { - /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ - xMsQuerySourceAuthorization?: string; -} - -/** Contains response data for the retrieve operation. */ -export type KnowledgeRetrievalRetrieveResponse = KnowledgeBaseRetrievalResponse; - -/** Optional parameters. */ -export interface SearchClientOptionalParams - extends coreHttpCompat.ExtendedServiceClientOptions { - /** Overrides client endpoint. */ - endpoint?: string; -} diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/models/mappers.ts b/sdk/search/search-documents/src/generated/knowledgeBase/models/mappers.ts deleted file mode 100644 index a518ec761054..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/models/mappers.ts +++ /dev/null @@ -1,1443 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; - -export const KnowledgeBaseRetrievalRequest: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseRetrievalRequest", - modelProperties: { - messages: { - serializedName: "messages", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseMessage", - }, - }, - }, - }, - intents: { - serializedName: "intents", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeRetrievalIntent", - }, - }, - }, - }, - maxRuntimeInSeconds: { - serializedName: "maxRuntimeInSeconds", - type: { - name: "Number", - }, - }, - maxOutputSize: { - serializedName: "maxOutputSize", - type: { - name: "Number", - }, - }, - retrievalReasoningEffort: { - serializedName: "retrievalReasoningEffort", - type: { - name: "Composite", - className: "KnowledgeRetrievalReasoningEffort", - }, - }, - includeActivity: { - serializedName: "includeActivity", - type: { - name: "Boolean", - }, - }, - outputMode: { - serializedName: "outputMode", - type: { - name: "String", - }, - }, - knowledgeSourceParams: { - serializedName: "knowledgeSourceParams", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeSourceParams", - }, - }, - }, - }, - }, - }, -}; - -export const KnowledgeBaseMessage: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseMessage", - modelProperties: { - role: { - serializedName: "role", - type: { - name: "String", - }, - }, - content: { - serializedName: "content", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseMessageContent", - }, - }, - }, - }, - }, - }, -}; - -export const KnowledgeBaseMessageContent: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseMessageContent", - uberParent: "KnowledgeBaseMessageContent", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type", - }, - modelProperties: { - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeRetrievalIntent: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeRetrievalIntent", - uberParent: "KnowledgeRetrievalIntent", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type", - }, - modelProperties: { - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeRetrievalReasoningEffort: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeRetrievalReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeSourceParams: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - knowledgeSourceName: { - serializedName: "knowledgeSourceName", - required: true, - type: { - name: "String", - }, - }, - includeReferences: { - serializedName: "includeReferences", - type: { - name: "Boolean", - }, - }, - includeReferenceSourceData: { - serializedName: "includeReferenceSourceData", - type: { - name: "Boolean", - }, - }, - alwaysQuerySource: { - serializedName: "alwaysQuerySource", - type: { - name: "Boolean", - }, - }, - rerankerThreshold: { - serializedName: "rerankerThreshold", - type: { - name: "Number", - }, - }, - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseRetrievalResponse: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseRetrievalResponse", - modelProperties: { - response: { - serializedName: "response", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseMessage", - }, - }, - }, - }, - activity: { - serializedName: "activity", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseActivityRecord", - }, - }, - }, - }, - references: { - serializedName: "references", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseReference", - }, - }, - }, - }, - }, - }, -}; - -export const KnowledgeBaseActivityRecord: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseActivityRecord", - uberParent: "KnowledgeBaseActivityRecord", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type", - }, - modelProperties: { - id: { - serializedName: "id", - required: true, - type: { - name: "Number", - }, - }, - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - elapsedMs: { - serializedName: "elapsedMs", - type: { - name: "Number", - }, - }, - error: { - serializedName: "error", - type: { - name: "Composite", - className: "KnowledgeBaseErrorDetail", - }, - }, - }, - }, -}; - -export const KnowledgeBaseErrorDetail: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseErrorDetail", - modelProperties: { - code: { - serializedName: "code", - readOnly: true, - type: { - name: "String", - }, - }, - message: { - serializedName: "message", - readOnly: true, - type: { - name: "String", - }, - }, - target: { - serializedName: "target", - readOnly: true, - type: { - name: "String", - }, - }, - details: { - serializedName: "details", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseErrorDetail", - }, - }, - }, - }, - additionalInfo: { - serializedName: "additionalInfo", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseErrorAdditionalInfo", - }, - }, - }, - }, - }, - }, -}; - -export const KnowledgeBaseErrorAdditionalInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseErrorAdditionalInfo", - modelProperties: { - type: { - serializedName: "type", - readOnly: true, - type: { - name: "String", - }, - }, - info: { - serializedName: "info", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const KnowledgeBaseReference: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type", - }, - modelProperties: { - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - id: { - serializedName: "id", - required: true, - type: { - name: "String", - }, - }, - activitySource: { - serializedName: "activitySource", - required: true, - type: { - name: "Number", - }, - }, - sourceData: { - serializedName: "sourceData", - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - rerankerScore: { - serializedName: "rerankerScore", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const ErrorResponse: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorResponse", - modelProperties: { - error: { - serializedName: "error", - type: { - name: "Composite", - className: "ErrorDetail", - }, - }, - }, - }, -}; - -export const ErrorDetail: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorDetail", - modelProperties: { - code: { - serializedName: "code", - readOnly: true, - type: { - name: "String", - }, - }, - message: { - serializedName: "message", - readOnly: true, - type: { - name: "String", - }, - }, - target: { - serializedName: "target", - readOnly: true, - type: { - name: "String", - }, - }, - details: { - serializedName: "details", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ErrorDetail", - }, - }, - }, - }, - additionalInfo: { - serializedName: "additionalInfo", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ErrorAdditionalInfo", - }, - }, - }, - }, - }, - }, -}; - -export const ErrorAdditionalInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorAdditionalInfo", - modelProperties: { - type: { - serializedName: "type", - readOnly: true, - type: { - name: "String", - }, - }, - info: { - serializedName: "info", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const KnowledgeBaseMessageImageContentImage: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "KnowledgeBaseMessageImageContentImage", - modelProperties: { - url: { - serializedName: "url", - required: true, - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseSearchIndexActivityArguments: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "KnowledgeBaseSearchIndexActivityArguments", - modelProperties: { - search: { - serializedName: "search", - type: { - name: "String", - }, - }, - filter: { - serializedName: "filter", - type: { - name: "String", - }, - }, - sourceDataFields: { - serializedName: "sourceDataFields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexFieldReference", - }, - }, - }, - }, - searchFields: { - serializedName: "searchFields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexFieldReference", - }, - }, - }, - }, - semanticConfigurationName: { - serializedName: "semanticConfigurationName", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const SearchIndexFieldReference: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexFieldReference", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseAzureBlobActivityArguments: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "KnowledgeBaseAzureBlobActivityArguments", - modelProperties: { - search: { - serializedName: "search", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseIndexedSharePointActivityArguments: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "KnowledgeBaseIndexedSharePointActivityArguments", - modelProperties: { - search: { - serializedName: "search", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseIndexedOneLakeActivityArguments: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "KnowledgeBaseIndexedOneLakeActivityArguments", - modelProperties: { - search: { - serializedName: "search", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseWebActivityArguments: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseWebActivityArguments", - modelProperties: { - search: { - serializedName: "search", - type: { - name: "String", - }, - }, - language: { - serializedName: "language", - type: { - name: "String", - }, - }, - market: { - serializedName: "market", - type: { - name: "String", - }, - }, - count: { - serializedName: "count", - type: { - name: "Number", - }, - }, - freshness: { - serializedName: "freshness", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseRemoteSharePointActivityArguments: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "KnowledgeBaseRemoteSharePointActivityArguments", - modelProperties: { - search: { - serializedName: "search", - type: { - name: "String", - }, - }, - filterExpressionAddOn: { - serializedName: "filterExpressionAddOn", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const SharePointSensitivityLabelInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SharePointSensitivityLabelInfo", - modelProperties: { - displayName: { - serializedName: "displayName", - type: { - name: "String", - }, - }, - sensitivityLabelId: { - serializedName: "sensitivityLabelId", - type: { - name: "String", - }, - }, - tooltip: { - serializedName: "tooltip", - type: { - name: "String", - }, - }, - priority: { - serializedName: "priority", - type: { - name: "Number", - }, - }, - color: { - serializedName: "color", - type: { - name: "String", - }, - }, - isEncrypted: { - serializedName: "isEncrypted", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const KnowledgeBaseMessageTextContent: coreClient.CompositeMapper = { - serializedName: "text", - type: { - name: "Composite", - className: "KnowledgeBaseMessageTextContent", - uberParent: "KnowledgeBaseMessageContent", - polymorphicDiscriminator: - KnowledgeBaseMessageContent.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseMessageContent.type.modelProperties, - text: { - serializedName: "text", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseMessageImageContent: coreClient.CompositeMapper = { - serializedName: "image", - type: { - name: "Composite", - className: "KnowledgeBaseMessageImageContent", - uberParent: "KnowledgeBaseMessageContent", - polymorphicDiscriminator: - KnowledgeBaseMessageContent.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseMessageContent.type.modelProperties, - image: { - serializedName: "image", - type: { - name: "Composite", - className: "KnowledgeBaseMessageImageContentImage", - }, - }, - }, - }, -}; - -export const KnowledgeRetrievalSemanticIntent: coreClient.CompositeMapper = { - serializedName: "semantic", - type: { - name: "Composite", - className: "KnowledgeRetrievalSemanticIntent", - uberParent: "KnowledgeRetrievalIntent", - polymorphicDiscriminator: - KnowledgeRetrievalIntent.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalIntent.type.modelProperties, - search: { - serializedName: "search", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeRetrievalMinimalReasoningEffort: coreClient.CompositeMapper = - { - serializedName: "minimal", - type: { - name: "Composite", - className: "KnowledgeRetrievalMinimalReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: - KnowledgeRetrievalReasoningEffort.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalReasoningEffort.type.modelProperties, - }, - }, - }; - -export const KnowledgeRetrievalLowReasoningEffort: coreClient.CompositeMapper = - { - serializedName: "low", - type: { - name: "Composite", - className: "KnowledgeRetrievalLowReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: - KnowledgeRetrievalReasoningEffort.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalReasoningEffort.type.modelProperties, - }, - }, - }; - -export const KnowledgeRetrievalMediumReasoningEffort: coreClient.CompositeMapper = - { - serializedName: "medium", - type: { - name: "Composite", - className: "KnowledgeRetrievalMediumReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: - KnowledgeRetrievalReasoningEffort.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalReasoningEffort.type.modelProperties, - }, - }, - }; - -export const SearchIndexKnowledgeSourceParams: coreClient.CompositeMapper = { - serializedName: "searchIndex", - type: { - name: "Composite", - className: "SearchIndexKnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: - KnowledgeSourceParams.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceParams.type.modelProperties, - filterAddOn: { - serializedName: "filterAddOn", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AzureBlobKnowledgeSourceParams: coreClient.CompositeMapper = { - serializedName: "azureBlob", - type: { - name: "Composite", - className: "AzureBlobKnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: - KnowledgeSourceParams.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceParams.type.modelProperties, - }, - }, -}; - -export const IndexedSharePointKnowledgeSourceParams: coreClient.CompositeMapper = - { - serializedName: "indexedSharePoint", - type: { - name: "Composite", - className: "IndexedSharePointKnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: - KnowledgeSourceParams.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceParams.type.modelProperties, - }, - }, - }; - -export const IndexedOneLakeKnowledgeSourceParams: coreClient.CompositeMapper = { - serializedName: "indexedOneLake", - type: { - name: "Composite", - className: "IndexedOneLakeKnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: - KnowledgeSourceParams.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceParams.type.modelProperties, - }, - }, -}; - -export const WebKnowledgeSourceParams: coreClient.CompositeMapper = { - serializedName: "web", - type: { - name: "Composite", - className: "WebKnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: - KnowledgeSourceParams.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceParams.type.modelProperties, - language: { - serializedName: "language", - type: { - name: "String", - }, - }, - market: { - serializedName: "market", - type: { - name: "String", - }, - }, - count: { - serializedName: "count", - type: { - name: "Number", - }, - }, - freshness: { - serializedName: "freshness", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const RemoteSharePointKnowledgeSourceParams: coreClient.CompositeMapper = - { - serializedName: "remoteSharePoint", - type: { - name: "Composite", - className: "RemoteSharePointKnowledgeSourceParams", - uberParent: "KnowledgeSourceParams", - polymorphicDiscriminator: - KnowledgeSourceParams.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceParams.type.modelProperties, - filterExpressionAddOn: { - serializedName: "filterExpressionAddOn", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseRetrievalActivityRecord: coreClient.CompositeMapper = - { - serializedName: "KnowledgeBaseRetrievalActivityRecord", - type: { - name: "Composite", - className: "KnowledgeBaseRetrievalActivityRecord", - uberParent: "KnowledgeBaseActivityRecord", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type", - }, - modelProperties: { - ...KnowledgeBaseActivityRecord.type.modelProperties, - knowledgeSourceName: { - serializedName: "knowledgeSourceName", - type: { - name: "String", - }, - }, - queryTime: { - serializedName: "queryTime", - type: { - name: "DateTime", - }, - }, - count: { - serializedName: "count", - type: { - name: "Number", - }, - }, - }, - }, - }; - -export const KnowledgeBaseModelQueryPlanningActivityRecord: coreClient.CompositeMapper = - { - serializedName: "modelQueryPlanning", - type: { - name: "Composite", - className: "KnowledgeBaseModelQueryPlanningActivityRecord", - uberParent: "KnowledgeBaseActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseActivityRecord.type.modelProperties, - inputTokens: { - serializedName: "inputTokens", - type: { - name: "Number", - }, - }, - outputTokens: { - serializedName: "outputTokens", - type: { - name: "Number", - }, - }, - }, - }, - }; - -export const KnowledgeBaseModelAnswerSynthesisActivityRecord: coreClient.CompositeMapper = - { - serializedName: "modelAnswerSynthesis", - type: { - name: "Composite", - className: "KnowledgeBaseModelAnswerSynthesisActivityRecord", - uberParent: "KnowledgeBaseActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseActivityRecord.type.modelProperties, - inputTokens: { - serializedName: "inputTokens", - type: { - name: "Number", - }, - }, - outputTokens: { - serializedName: "outputTokens", - type: { - name: "Number", - }, - }, - }, - }, - }; - -export const KnowledgeBaseAgenticReasoningActivityRecord: coreClient.CompositeMapper = - { - serializedName: "agenticReasoning", - type: { - name: "Composite", - className: "KnowledgeBaseAgenticReasoningActivityRecord", - uberParent: "KnowledgeBaseActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseActivityRecord.type.modelProperties, - reasoningTokens: { - serializedName: "reasoningTokens", - type: { - name: "Number", - }, - }, - retrievalReasoningEffort: { - serializedName: "retrievalReasoningEffort", - type: { - name: "Composite", - className: "KnowledgeRetrievalReasoningEffort", - }, - }, - }, - }, - }; - -export const KnowledgeBaseSearchIndexReference: coreClient.CompositeMapper = { - serializedName: "searchIndex", - type: { - name: "Composite", - className: "KnowledgeBaseSearchIndexReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: - KnowledgeBaseReference.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseReference.type.modelProperties, - docKey: { - serializedName: "docKey", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseAzureBlobReference: coreClient.CompositeMapper = { - serializedName: "azureBlob", - type: { - name: "Composite", - className: "KnowledgeBaseAzureBlobReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: - KnowledgeBaseReference.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseReference.type.modelProperties, - blobUrl: { - serializedName: "blobUrl", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseIndexedSharePointReference: coreClient.CompositeMapper = - { - serializedName: "indexedSharePoint", - type: { - name: "Composite", - className: "KnowledgeBaseIndexedSharePointReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: - KnowledgeBaseReference.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseReference.type.modelProperties, - docUrl: { - serializedName: "docUrl", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseIndexedOneLakeReference: coreClient.CompositeMapper = - { - serializedName: "indexedOneLake", - type: { - name: "Composite", - className: "KnowledgeBaseIndexedOneLakeReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: - KnowledgeBaseReference.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseReference.type.modelProperties, - docUrl: { - serializedName: "docUrl", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const KnowledgeBaseWebReference: coreClient.CompositeMapper = { - serializedName: "web", - type: { - name: "Composite", - className: "KnowledgeBaseWebReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: - KnowledgeBaseReference.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseReference.type.modelProperties, - url: { - serializedName: "url", - type: { - name: "String", - }, - }, - title: { - serializedName: "title", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseRemoteSharePointReference: coreClient.CompositeMapper = - { - serializedName: "remoteSharePoint", - type: { - name: "Composite", - className: "KnowledgeBaseRemoteSharePointReference", - uberParent: "KnowledgeBaseReference", - polymorphicDiscriminator: - KnowledgeBaseReference.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseReference.type.modelProperties, - webUrl: { - serializedName: "webUrl", - type: { - name: "String", - }, - }, - searchSensitivityLabelInfo: { - serializedName: "searchSensitivityLabelInfo", - type: { - name: "Composite", - className: "SharePointSensitivityLabelInfo", - }, - }, - }, - }, - }; - -export const KnowledgeBaseSearchIndexActivityRecord: coreClient.CompositeMapper = - { - serializedName: "searchIndex", - type: { - name: "Composite", - className: "KnowledgeBaseSearchIndexActivityRecord", - uberParent: "KnowledgeBaseRetrievalActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseRetrievalActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseRetrievalActivityRecord.type.modelProperties, - searchIndexArguments: { - serializedName: "searchIndexArguments", - type: { - name: "Composite", - className: "KnowledgeBaseSearchIndexActivityArguments", - }, - }, - }, - }, - }; - -export const KnowledgeBaseAzureBlobActivityRecord: coreClient.CompositeMapper = - { - serializedName: "azureBlob", - type: { - name: "Composite", - className: "KnowledgeBaseAzureBlobActivityRecord", - uberParent: "KnowledgeBaseRetrievalActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseRetrievalActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseRetrievalActivityRecord.type.modelProperties, - azureBlobArguments: { - serializedName: "azureBlobArguments", - type: { - name: "Composite", - className: "KnowledgeBaseAzureBlobActivityArguments", - }, - }, - }, - }, - }; - -export const KnowledgeBaseIndexedSharePointActivityRecord: coreClient.CompositeMapper = - { - serializedName: "indexedSharePoint", - type: { - name: "Composite", - className: "KnowledgeBaseIndexedSharePointActivityRecord", - uberParent: "KnowledgeBaseRetrievalActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseRetrievalActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseRetrievalActivityRecord.type.modelProperties, - indexedSharePointArguments: { - serializedName: "indexedSharePointArguments", - type: { - name: "Composite", - className: "KnowledgeBaseIndexedSharePointActivityArguments", - }, - }, - }, - }, - }; - -export const KnowledgeBaseIndexedOneLakeActivityRecord: coreClient.CompositeMapper = - { - serializedName: "indexedOneLake", - type: { - name: "Composite", - className: "KnowledgeBaseIndexedOneLakeActivityRecord", - uberParent: "KnowledgeBaseRetrievalActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseRetrievalActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseRetrievalActivityRecord.type.modelProperties, - indexedOneLakeArguments: { - serializedName: "indexedOneLakeArguments", - type: { - name: "Composite", - className: "KnowledgeBaseIndexedOneLakeActivityArguments", - }, - }, - }, - }, - }; - -export const KnowledgeBaseWebActivityRecord: coreClient.CompositeMapper = { - serializedName: "web", - type: { - name: "Composite", - className: "KnowledgeBaseWebActivityRecord", - uberParent: "KnowledgeBaseRetrievalActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseRetrievalActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseRetrievalActivityRecord.type.modelProperties, - webArguments: { - serializedName: "webArguments", - type: { - name: "Composite", - className: "KnowledgeBaseWebActivityArguments", - }, - }, - }, - }, -}; - -export const KnowledgeBaseRemoteSharePointActivityRecord: coreClient.CompositeMapper = - { - serializedName: "remoteSharePoint", - type: { - name: "Composite", - className: "KnowledgeBaseRemoteSharePointActivityRecord", - uberParent: "KnowledgeBaseRetrievalActivityRecord", - polymorphicDiscriminator: - KnowledgeBaseRetrievalActivityRecord.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseRetrievalActivityRecord.type.modelProperties, - remoteSharePointArguments: { - serializedName: "remoteSharePointArguments", - type: { - name: "Composite", - className: "KnowledgeBaseRemoteSharePointActivityArguments", - }, - }, - }, - }, - }; - -export let discriminators = { - KnowledgeBaseMessageContent: KnowledgeBaseMessageContent, - KnowledgeRetrievalIntent: KnowledgeRetrievalIntent, - KnowledgeRetrievalReasoningEffort: KnowledgeRetrievalReasoningEffort, - KnowledgeSourceParams: KnowledgeSourceParams, - KnowledgeBaseActivityRecord: KnowledgeBaseActivityRecord, - KnowledgeBaseReference: KnowledgeBaseReference, - "KnowledgeBaseMessageContent.text": KnowledgeBaseMessageTextContent, - "KnowledgeBaseMessageContent.image": KnowledgeBaseMessageImageContent, - "KnowledgeRetrievalIntent.semantic": KnowledgeRetrievalSemanticIntent, - "KnowledgeRetrievalReasoningEffort.minimal": - KnowledgeRetrievalMinimalReasoningEffort, - "KnowledgeRetrievalReasoningEffort.low": KnowledgeRetrievalLowReasoningEffort, - "KnowledgeRetrievalReasoningEffort.medium": - KnowledgeRetrievalMediumReasoningEffort, - "KnowledgeSourceParams.searchIndex": SearchIndexKnowledgeSourceParams, - "KnowledgeSourceParams.azureBlob": AzureBlobKnowledgeSourceParams, - "KnowledgeSourceParams.indexedSharePoint": - IndexedSharePointKnowledgeSourceParams, - "KnowledgeSourceParams.indexedOneLake": IndexedOneLakeKnowledgeSourceParams, - "KnowledgeSourceParams.web": WebKnowledgeSourceParams, - "KnowledgeSourceParams.remoteSharePoint": - RemoteSharePointKnowledgeSourceParams, - "KnowledgeBaseActivityRecord.KnowledgeBaseRetrievalActivityRecord": - KnowledgeBaseRetrievalActivityRecord, - "KnowledgeBaseActivityRecord.modelQueryPlanning": - KnowledgeBaseModelQueryPlanningActivityRecord, - "KnowledgeBaseActivityRecord.modelAnswerSynthesis": - KnowledgeBaseModelAnswerSynthesisActivityRecord, - "KnowledgeBaseActivityRecord.agenticReasoning": - KnowledgeBaseAgenticReasoningActivityRecord, - "KnowledgeBaseReference.searchIndex": KnowledgeBaseSearchIndexReference, - "KnowledgeBaseReference.azureBlob": KnowledgeBaseAzureBlobReference, - "KnowledgeBaseReference.indexedSharePoint": - KnowledgeBaseIndexedSharePointReference, - "KnowledgeBaseReference.indexedOneLake": KnowledgeBaseIndexedOneLakeReference, - "KnowledgeBaseReference.web": KnowledgeBaseWebReference, - "KnowledgeBaseReference.remoteSharePoint": - KnowledgeBaseRemoteSharePointReference, - "KnowledgeBaseRetrievalActivityRecord.searchIndex": - KnowledgeBaseSearchIndexActivityRecord, - "KnowledgeBaseRetrievalActivityRecord.azureBlob": - KnowledgeBaseAzureBlobActivityRecord, - "KnowledgeBaseRetrievalActivityRecord.indexedSharePoint": - KnowledgeBaseIndexedSharePointActivityRecord, - "KnowledgeBaseRetrievalActivityRecord.indexedOneLake": - KnowledgeBaseIndexedOneLakeActivityRecord, - "KnowledgeBaseRetrievalActivityRecord.web": KnowledgeBaseWebActivityRecord, - "KnowledgeBaseRetrievalActivityRecord.remoteSharePoint": - KnowledgeBaseRemoteSharePointActivityRecord, -}; diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/models/parameters.ts b/sdk/search/search-documents/src/generated/knowledgeBase/models/parameters.ts deleted file mode 100644 index b542bf6c996e..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/models/parameters.ts +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - OperationParameter, - OperationURLParameter, - OperationQueryParameter, -} from "@azure/core-client"; -import { KnowledgeBaseRetrievalRequest as KnowledgeBaseRetrievalRequestMapper } from "../models/mappers.js"; - -export const contentType: OperationParameter = { - parameterPath: ["options", "contentType"], - mapper: { - defaultValue: "application/json", - isConstant: true, - serializedName: "Content-Type", - type: { - name: "String", - }, - }, -}; - -export const retrievalRequest: OperationParameter = { - parameterPath: "retrievalRequest", - mapper: KnowledgeBaseRetrievalRequestMapper, -}; - -export const accept: OperationParameter = { - parameterPath: "accept", - mapper: { - defaultValue: "application/json", - isConstant: true, - serializedName: "Accept", - type: { - name: "String", - }, - }, -}; - -export const endpoint: OperationURLParameter = { - parameterPath: "endpoint", - mapper: { - serializedName: "endpoint", - required: true, - type: { - name: "String", - }, - }, - skipEncoding: true, -}; - -export const knowledgeBaseName: OperationURLParameter = { - parameterPath: "knowledgeBaseName", - mapper: { - serializedName: "knowledgeBaseName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const apiVersion: OperationQueryParameter = { - parameterPath: "apiVersion", - mapper: { - serializedName: "api-version", - required: true, - type: { - name: "String", - }, - }, -}; - -export const xMsQuerySourceAuthorization: OperationParameter = { - parameterPath: ["options", "xMsQuerySourceAuthorization"], - mapper: { - serializedName: "x-ms-query-source-authorization", - type: { - name: "String", - }, - }, -}; diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/operations/index.ts b/sdk/search/search-documents/src/generated/knowledgeBase/operations/index.ts deleted file mode 100644 index 109c3f0a8776..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/operations/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./knowledgeRetrieval.js"; diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/operations/knowledgeRetrieval.ts b/sdk/search/search-documents/src/generated/knowledgeBase/operations/knowledgeRetrieval.ts deleted file mode 100644 index 1540d3d4ca7c..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/operations/knowledgeRetrieval.ts +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { KnowledgeRetrieval } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchClient } from "../searchClient.js"; -import { - KnowledgeBaseRetrievalRequest, - KnowledgeRetrievalRetrieveOptionalParams, - KnowledgeRetrievalRetrieveResponse, -} from "../models/index.js"; - -/** Class containing KnowledgeRetrieval operations. */ -export class KnowledgeRetrievalImpl implements KnowledgeRetrieval { - private readonly client: SearchClient; - - /** - * Initialize a new instance of the class KnowledgeRetrieval class. - * @param client Reference to the service client - */ - constructor(client: SearchClient) { - this.client = client; - } - - /** - * KnowledgeBase retrieves relevant data from backing stores. - * @param retrievalRequest The retrieval request to process. - * @param options The options parameters. - */ - retrieve( - retrievalRequest: KnowledgeBaseRetrievalRequest, - options?: KnowledgeRetrievalRetrieveOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { retrievalRequest, options }, - retrieveOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const retrieveOperationSpec: coreClient.OperationSpec = { - path: "/retrieve", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: Mappers.KnowledgeBaseRetrievalResponse, - }, - 206: { - bodyMapper: Mappers.KnowledgeBaseRetrievalResponse, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.retrievalRequest, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.knowledgeBaseName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.xMsQuerySourceAuthorization, - ], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/knowledgeBase/operationsInterfaces/index.ts deleted file mode 100644 index 109c3f0a8776..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/operationsInterfaces/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./knowledgeRetrieval.js"; diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.ts b/sdk/search/search-documents/src/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.ts deleted file mode 100644 index 5914e36d79b0..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.ts +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - KnowledgeBaseRetrievalRequest, - KnowledgeRetrievalRetrieveOptionalParams, - KnowledgeRetrievalRetrieveResponse, -} from "../models/index.js"; - -/** Interface representing a KnowledgeRetrieval. */ -export interface KnowledgeRetrieval { - /** - * KnowledgeBase retrieves relevant data from backing stores. - * @param retrievalRequest The retrieval request to process. - * @param options The options parameters. - */ - retrieve( - retrievalRequest: KnowledgeBaseRetrievalRequest, - options?: KnowledgeRetrievalRetrieveOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/knowledgeBase/searchClient.ts b/sdk/search/search-documents/src/generated/knowledgeBase/searchClient.ts deleted file mode 100644 index 8559344bfe4c..000000000000 --- a/sdk/search/search-documents/src/generated/knowledgeBase/searchClient.ts +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreHttpCompat from "@azure/core-http-compat"; -import { - PipelineRequest, - PipelineResponse, - SendRequest, -} from "@azure/core-rest-pipeline"; -import { KnowledgeRetrievalImpl } from "./operations/index.js"; -import { KnowledgeRetrieval } from "./operationsInterfaces/index.js"; -import { - ApiVersion20251101Preview, - SearchClientOptionalParams, -} from "./models/index.js"; - -/** @internal */ -export class SearchClient extends coreHttpCompat.ExtendedServiceClient { - endpoint: string; - knowledgeBaseName: string; - apiVersion: ApiVersion20251101Preview; - - /** - * Initializes a new instance of the SearchClient class. - * @param endpoint The endpoint URL of the search service. - * @param knowledgeBaseName The name of the knowledge base. - * @param apiVersion Api Version - * @param options The parameter options - */ - constructor( - endpoint: string, - knowledgeBaseName: string, - apiVersion: ApiVersion20251101Preview, - options?: SearchClientOptionalParams, - ) { - if (endpoint === undefined) { - throw new Error("'endpoint' cannot be null"); - } - if (knowledgeBaseName === undefined) { - throw new Error("'knowledgeBaseName' cannot be null"); - } - if (apiVersion === undefined) { - throw new Error("'apiVersion' cannot be null"); - } - - // Initializing default values for options - if (!options) { - options = {}; - } - const defaults: SearchClientOptionalParams = { - requestContentType: "application/json; charset=utf-8", - }; - - const packageDetails = `azsdk-js-search-documents/12.3.0-beta.1`; - const userAgentPrefix = - options.userAgentOptions && options.userAgentOptions.userAgentPrefix - ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` - : `${packageDetails}`; - - const optionsWithDefaults = { - ...defaults, - ...options, - userAgentOptions: { - userAgentPrefix, - }, - endpoint: - options.endpoint ?? - options.baseUri ?? - "{endpoint}/knowledgebases('{knowledgeBaseName}')", - }; - super(optionsWithDefaults); - // Parameter assignments - this.endpoint = endpoint; - this.knowledgeBaseName = knowledgeBaseName; - this.apiVersion = apiVersion; - this.knowledgeRetrieval = new KnowledgeRetrievalImpl(this); - this.addCustomApiVersionPolicy(apiVersion); - } - - /** A function that adds a policy that sets the api-version (or equivalent) to reflect the library version. */ - private addCustomApiVersionPolicy(apiVersion?: string) { - if (!apiVersion) { - return; - } - const apiVersionPolicy = { - name: "CustomApiVersionPolicy", - async sendRequest( - request: PipelineRequest, - next: SendRequest, - ): Promise { - const param = request.url.split("?"); - if (param.length > 1) { - const newParams = param[1].split("&").map((item) => { - if (item.indexOf("api-version") > -1) { - return "api-version=" + apiVersion; - } else { - return item; - } - }); - request.url = param[0] + "?" + newParams.join("&"); - } - return next(request); - }, - }; - this.pipeline.addPolicy(apiVersionPolicy); - } - - knowledgeRetrieval: KnowledgeRetrieval; -} diff --git a/sdk/search/search-documents/src/generated/service/index.ts b/sdk/search/search-documents/src/generated/service/index.ts deleted file mode 100644 index 275645f04104..000000000000 --- a/sdk/search/search-documents/src/generated/service/index.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./models/index.js"; -export { SearchServiceClient } from "./searchServiceClient.js"; -export * from "./operationsInterfaces/index.js"; diff --git a/sdk/search/search-documents/src/generated/service/models/index.ts b/sdk/search/search-documents/src/generated/service/models/index.ts deleted file mode 100644 index e59e88c66feb..000000000000 --- a/sdk/search/search-documents/src/generated/service/models/index.ts +++ /dev/null @@ -1,6605 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; -import * as coreHttpCompat from "@azure/core-http-compat"; - -export type KnowledgeBaseModelUnion = - | KnowledgeBaseModel - | KnowledgeBaseAzureOpenAIModel; -export type KnowledgeRetrievalReasoningEffortUnion = - | KnowledgeRetrievalReasoningEffort - | KnowledgeRetrievalMinimalReasoningEffort - | KnowledgeRetrievalLowReasoningEffort - | KnowledgeRetrievalMediumReasoningEffort; -export type SearchIndexerDataIdentityUnion = - | SearchIndexerDataIdentity - | SearchIndexerDataNoneIdentity - | SearchIndexerDataUserAssignedIdentity; -export type KnowledgeSourceUnion = - | KnowledgeSource - | SearchIndexKnowledgeSource - | AzureBlobKnowledgeSource - | IndexedSharePointKnowledgeSource - | IndexedOneLakeKnowledgeSource - | WebKnowledgeSource - | RemoteSharePointKnowledgeSource; -export type DataChangeDetectionPolicyUnion = - | DataChangeDetectionPolicy - | HighWaterMarkChangeDetectionPolicy - | SqlIntegratedChangeTrackingPolicy; -export type DataDeletionDetectionPolicyUnion = - | DataDeletionDetectionPolicy - | SoftDeleteColumnDeletionDetectionPolicy - | NativeBlobSoftDeleteDeletionDetectionPolicy; -export type SearchIndexerSkillUnion = - | SearchIndexerSkill - | ConditionalSkill - | KeyPhraseExtractionSkill - | OcrSkill - | ImageAnalysisSkill - | LanguageDetectionSkill - | ShaperSkill - | MergeSkill - | EntityRecognitionSkill - | SentimentSkill - | SentimentSkillV3 - | EntityLinkingSkill - | EntityRecognitionSkillV3 - | PIIDetectionSkill - | SplitSkill - | CustomEntityLookupSkill - | TextTranslationSkill - | DocumentExtractionSkill - | DocumentIntelligenceLayoutSkill - | WebApiSkillUnion - | ContentUnderstandingSkill - | AzureMachineLearningSkill - | AzureOpenAIEmbeddingSkill - | VisionVectorizeSkill; -export type CognitiveServicesAccountUnion = - | CognitiveServicesAccount - | DefaultCognitiveServicesAccount - | CognitiveServicesAccountKey - | AIServicesAccountKey - | AIServicesAccountIdentity; -export type ScoringFunctionUnion = - | ScoringFunction - | DistanceScoringFunction - | FreshnessScoringFunction - | MagnitudeScoringFunction - | TagScoringFunction; -export type LexicalAnalyzerUnion = - | LexicalAnalyzer - | CustomAnalyzer - | PatternAnalyzer - | LuceneStandardAnalyzer - | StopAnalyzer; -export type LexicalTokenizerUnion = - | LexicalTokenizer - | ClassicTokenizer - | EdgeNGramTokenizer - | KeywordTokenizer - | KeywordTokenizerV2 - | MicrosoftLanguageTokenizer - | MicrosoftLanguageStemmingTokenizer - | NGramTokenizer - | PathHierarchyTokenizerV2 - | PatternTokenizer - | LuceneStandardTokenizer - | LuceneStandardTokenizerV2 - | UaxUrlEmailTokenizer; -export type TokenFilterUnion = - | TokenFilter - | AsciiFoldingTokenFilter - | CjkBigramTokenFilter - | CommonGramTokenFilter - | DictionaryDecompounderTokenFilter - | EdgeNGramTokenFilter - | EdgeNGramTokenFilterV2 - | ElisionTokenFilter - | KeepTokenFilter - | KeywordMarkerTokenFilter - | LengthTokenFilter - | LimitTokenFilter - | NGramTokenFilter - | NGramTokenFilterV2 - | PatternCaptureTokenFilter - | PatternReplaceTokenFilter - | PhoneticTokenFilter - | ShingleTokenFilter - | SnowballTokenFilter - | StemmerTokenFilter - | StemmerOverrideTokenFilter - | StopwordsTokenFilter - | SynonymTokenFilter - | TruncateTokenFilter - | UniqueTokenFilter - | WordDelimiterTokenFilter; -export type CharFilterUnion = - | CharFilter - | MappingCharFilter - | PatternReplaceCharFilter; -export type LexicalNormalizerUnion = LexicalNormalizer | CustomNormalizer; -export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity; -export type VectorSearchAlgorithmConfigurationUnion = - | VectorSearchAlgorithmConfiguration - | HnswAlgorithmConfiguration - | ExhaustiveKnnAlgorithmConfiguration; -export type VectorSearchVectorizerUnion = - | VectorSearchVectorizer - | AzureOpenAIVectorizer - | WebApiVectorizer - | AIServicesVisionVectorizer - | AMLVectorizer; -export type VectorSearchCompressionUnion = - | VectorSearchCompression - | ScalarQuantizationCompression - | BinaryQuantizationCompression; -export type KnowledgeSourceVectorizerUnion = - | KnowledgeSourceVectorizer - | KnowledgeSourceAzureOpenAIVectorizer; -export type WebApiSkillUnion = WebApiSkill | ChatCompletionSkill; - -export interface KnowledgeBase { - /** The name of the knowledge knowledge base. */ - name: string; - knowledgeSources: KnowledgeSourceReference[]; - /** Contains configuration options on how to connect to AI models. */ - models: KnowledgeBaseModelUnion[]; - retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; - /** The output configuration for this retrieval. */ - outputMode?: KnowledgeRetrievalOutputMode; - /** The ETag of the knowledge base. */ - etag?: string; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your knowledge base definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your knowledge base definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your knowledge base definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; - /** The description of the knowledge base. */ - description?: string; - /** Instructions considered by the knowledge knowledge base when developing query plan. */ - retrievalInstructions?: string; - /** Instructions considered by the knowledge knowledge base when generating answers. */ - answerInstructions?: string; -} - -export interface KnowledgeSourceReference { - /** The name of the knowledge source. */ - name: string; -} - -/** Specifies the connection parameters for the model to use for query planning. */ -export interface KnowledgeBaseModel { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI"; -} - -export interface KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "minimal" | "low" | "medium"; -} - -/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */ -export interface SearchResourceEncryptionKey { - /** The name of your Azure Key Vault key to be used to encrypt your data at rest. */ - keyName: string; - /** The version of your Azure Key Vault key to be used to encrypt your data at rest. */ - keyVersion?: string; - /** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be `https://my-keyvault-name.vault.azure.net`. */ - vaultUri: string; - /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */ - accessCredentials?: AzureActiveDirectoryApplicationCredentials; - /** An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified, the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; -} - -/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */ -export interface AzureActiveDirectoryApplicationCredentials { - /** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */ - applicationId: string; - /** The authentication key of the specified AAD application. */ - applicationSecret?: string; -} - -/** Abstract base type for data identities. */ -export interface SearchIndexerDataIdentity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.DataNoneIdentity" - | "#Microsoft.Azure.Search.DataUserAssignedIdentity"; -} - -/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */ -export interface ErrorResponse { - /** The error object. */ - error?: ErrorDetail; -} - -/** The error detail. */ -export interface ErrorDetail { - /** - * The error code. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly code?: string; - /** - * The error message. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly message?: string; - /** - * The error target. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly target?: string; - /** - * The error details. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly details?: ErrorDetail[]; - /** - * The error additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly additionalInfo?: ErrorAdditionalInfo[]; -} - -/** The resource management error additional info. */ -export interface ErrorAdditionalInfo { - /** - * The additional info type. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly type?: string; - /** - * The additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly info?: Record; -} - -export interface ListKnowledgeBasesResult { - knowledgeBases: KnowledgeBase[]; -} - -/** Represents a knowledge source definition. */ -export interface KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: - | "searchIndex" - | "azureBlob" - | "indexedSharePoint" - | "indexedOneLake" - | "web" - | "remoteSharePoint"; - /** The name of the knowledge source. */ - name: string; - /** Optional user-defined description. */ - description?: string; - /** The ETag of the knowledge base. */ - etag?: string; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your knowledge base definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your knowledge base definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your knowledge base definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; -} - -export interface ListKnowledgeSourcesResult { - knowledgeSources: KnowledgeSourceUnion[]; -} - -/** Represents the status and synchronization history of a knowledge source. */ -export interface KnowledgeSourceStatus { - /** The current synchronization status of the knowledge source. */ - synchronizationStatus: KnowledgeSourceSynchronizationStatus; - /** The synchronization interval (e.g., '1d' for daily). Null if no schedule is configured. */ - synchronizationInterval?: string; - /** Current synchronization state that spans multiple indexer runs. */ - currentSynchronizationState?: SynchronizationState; - /** Details of the last completed synchronization. Null on first sync. */ - lastSynchronizationState?: CompletedSynchronizationState; - /** Statistical information about the knowledge source synchronization history. Null on first sync. */ - statistics?: KnowledgeSourceStatistics; -} - -/** Represents the current state of an ongoing synchronization that spans multiple indexer runs. */ -export interface SynchronizationState { - /** The start time of the current synchronization. */ - startTime: Date; - /** The number of item updates successfully processed in the current synchronization. */ - itemsUpdatesProcessed: number; - /** The number of item updates that failed in the current synchronization. */ - itemsUpdatesFailed: number; - /** The number of items skipped in the current synchronization. */ - itemsSkipped: number; -} - -/** Represents the completed state of the last synchronization. */ -export interface CompletedSynchronizationState { - /** The start time of the last completed synchronization. */ - startTime: Date; - /** The end time of the last completed synchronization. */ - endTime: Date; - /** The number of item updates successfully processed in the last synchronization. */ - itemsUpdatesProcessed: number; - /** The number of item updates that failed in the last synchronization. */ - itemsUpdatesFailed: number; - /** The number of items skipped in the last synchronization. */ - itemsSkipped: number; -} - -/** Statistical information about knowledge source synchronization history. */ -export interface KnowledgeSourceStatistics { - /** The total number of synchronizations completed. */ - totalSynchronization: number; - /** The average duration of synchronizations in HH:MM:SS format. */ - averageSynchronizationDuration: string; - /** The average number of items processed per synchronization. */ - averageItemsProcessedPerSynchronization: number; -} - -/** Represents a datasource definition, which can be used to configure an indexer. */ -export interface SearchIndexerDataSource { - /** The name of the datasource. */ - name: string; - /** The description of the datasource. */ - description?: string; - /** The type of the datasource. */ - type: SearchIndexerDataSourceType; - /** - * A specific type of the data source, in case the resource is capable of different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly subType?: string; - /** Credentials for the datasource. */ - credentials: DataSourceCredentials; - /** The data container for the datasource. */ - container: SearchIndexerDataContainer; - /** An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If "none" is specified, the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; - /** Ingestion options with various types of permission data. */ - indexerPermissionOptions?: IndexerPermissionOption[]; - /** The data change detection policy for the datasource. */ - dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion; - /** The data deletion detection policy for the datasource. */ - dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion; - /** The ETag of the data source. */ - etag?: string; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition. Once you have encrypted your data source definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; -} - -/** Represents credentials that can be used to connect to a datasource. */ -export interface DataSourceCredentials { - /** The connection string for the datasource. Set to `` (with brackets) if you don't want the connection string updated. Set to `` if you want to remove the connection string value from the datasource. */ - connectionString?: string; -} - -/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */ -export interface SearchIndexerDataContainer { - /** The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. */ - name: string; - /** A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. */ - query?: string; -} - -/** Base type for data change detection policies. */ -export interface DataChangeDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" - | "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; -} - -/** Base type for data deletion detection policies. */ -export interface DataDeletionDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" - | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; -} - -/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */ -export interface ListDataSourcesResult { - /** - * The datasources in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly dataSources: SearchIndexerDataSource[]; -} - -export interface DocumentKeysOrIds { - /** document keys to be reset */ - documentKeys?: string[]; - /** datasource document identifiers to be reset */ - datasourceDocumentIds?: string[]; -} - -export interface IndexerResyncBody { - /** Re-sync options that have been pre-defined from data source. */ - options?: IndexerResyncOption[]; -} - -/** Represents an indexer. */ -export interface SearchIndexer { - /** The name of the indexer. */ - name: string; - /** The description of the indexer. */ - description?: string; - /** The name of the datasource from which this indexer reads data. */ - dataSourceName: string; - /** The name of the skillset executing with this indexer. */ - skillsetName?: string; - /** The name of the index to which this indexer writes data. */ - targetIndexName: string; - /** The schedule for this indexer. */ - schedule?: IndexingSchedule; - /** Parameters for indexer execution. */ - parameters?: IndexingParameters; - /** Defines mappings between fields in the data source and corresponding target fields in the index. */ - fieldMappings?: FieldMapping[]; - /** Output field mappings are applied after enrichment and immediately before indexing. */ - outputFieldMappings?: FieldMapping[]; - /** A value indicating whether the indexer is disabled. Default is false. */ - isDisabled?: boolean; - /** The ETag of the indexer. */ - etag?: string; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; - /** Adds caching to an enrichment pipeline to allow for incremental modification steps without having to rebuild the index every time. */ - cache?: SearchIndexerCache; -} - -/** Represents a schedule for indexer execution. */ -export interface IndexingSchedule { - /** The interval of time between indexer executions. */ - interval: string; - /** The time when an indexer should start running. */ - startTime?: Date; -} - -/** Represents parameters for indexer execution. */ -export interface IndexingParameters { - /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */ - batchSize?: number; - /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */ - maxFailedItems?: number; - /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */ - maxFailedItemsPerBatch?: number; - /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ - configuration?: IndexingParametersConfiguration; -} - -/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ -export interface IndexingParametersConfiguration { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** Represents the parsing mode for indexing from an Azure blob data source. */ - parsingMode?: BlobIndexerParsingMode; - /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over those files during indexing. */ - excludedFileNameExtensions?: string; - /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. */ - indexedFileNameExtensions?: string; - /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */ - failOnUnsupportedContentType?: boolean; - /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */ - failOnUnprocessableDocument?: boolean; - /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */ - indexStorageMetadataOnlyForOversizedDocuments?: boolean; - /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */ - delimitedTextHeaders?: string; - /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). */ - delimitedTextDelimiter?: string; - /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */ - firstLineContainsHeaders?: boolean; - /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ - markdownParsingSubmode?: MarkdownParsingSubmode; - /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ - markdownHeaderDepth?: MarkdownHeaderDepth; - /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */ - documentRoot?: string; - /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ - dataToExtract?: BlobIndexerDataToExtract; - /** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */ - imageAction?: BlobIndexerImageAction; - /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */ - allowSkillsetToReadFileData?: boolean; - /** Determines algorithm for text extraction from PDF files in Azure blob storage. */ - pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; - /** Specifies the environment in which the indexer should execute. */ - executionEnvironment?: IndexerExecutionEnvironment; - /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format "hh:mm:ss". */ - queryTimeout?: string; -} - -/** Defines a mapping between a field in a data source and a target field in an index. */ -export interface FieldMapping { - /** The name of the field in the data source. */ - sourceFieldName: string; - /** The name of the target field in the index. Same as the source field name by default. */ - targetFieldName?: string; - /** A function to apply to each source field value before indexing. */ - mappingFunction?: FieldMappingFunction; -} - -/** Represents a function that transforms a value from a data source before indexing. */ -export interface FieldMappingFunction { - /** The name of the field mapping function. */ - name: string; - /** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */ - parameters?: { [propertyName: string]: any }; -} - -export interface SearchIndexerCache { - /** A guid for the SearchIndexerCache. */ - id?: string; - /** The connection string to the storage account where the cache data will be persisted. */ - storageConnectionString?: string; - /** Specifies whether incremental reprocessing is enabled. */ - enableReprocessing?: boolean; - /** The user-assigned managed identity used for connections to the enrichment cache. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; -} - -/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */ -export interface ListIndexersResult { - /** - * The indexers in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly indexers: SearchIndexer[]; -} - -/** Represents the current status and execution history of an indexer. */ -export interface SearchIndexerStatus { - /** - * The name of the indexer. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly name: string; - /** - * Overall indexer status. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly status: IndexerStatus; - /** - * Snapshot of the indexer’s cumulative runtime consumption for the service over the current UTC period. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly runtime: IndexerRuntime; - /** - * The result of the most recent or an in-progress indexer execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly lastResult?: IndexerExecutionResult; - /** - * History of the recent indexer executions, sorted in reverse chronological order. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly executionHistory: IndexerExecutionResult[]; - /** - * The execution limits for the indexer. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly limits: SearchIndexerLimits; - /** - * All of the state that defines and dictates the indexer's current execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly currentState?: IndexerState; -} - -/** Represents the indexer's cumulative runtime consumption in the service. */ -export interface IndexerRuntime { - /** Cumulative runtime of the indexer from the beginningTime to endingTime, in seconds. */ - usedSeconds: number; - /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */ - remainingSeconds?: number; - /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ - beginningTime: Date; - /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ - endingTime: Date; -} - -/** Represents the result of an individual indexer execution. */ -export interface IndexerExecutionResult { - /** - * The outcome of this indexer execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly status: IndexerExecutionStatus; - /** - * The outcome of this indexer execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly statusDetail?: IndexerExecutionStatusDetail; - /** - * The mode the indexer is running in. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly mode?: IndexingMode; - /** - * The error message indicating the top-level error, if any. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly errorMessage?: string; - /** - * The start time of this indexer execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly startTime?: Date; - /** - * The end time of this indexer execution, if the execution has already completed. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly endTime?: Date; - /** - * The item-level indexing errors. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly errors: SearchIndexerError[]; - /** - * The item-level indexing warnings. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly warnings: SearchIndexerWarning[]; - /** - * The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly itemCount: number; - /** - * The number of items that failed to be indexed during this indexer execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly failedItemCount: number; - /** - * Change tracking state with which an indexer execution started. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly initialTrackingState?: string; - /** - * Change tracking state with which an indexer execution finished. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly finalTrackingState?: string; -} - -/** Represents an item- or document-level indexing error. */ -export interface SearchIndexerError { - /** - * The key of the item for which indexing failed. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly key?: string; - /** - * The message describing the error that occurred while processing the item. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly errorMessage: string; - /** - * The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly statusCode: number; - /** - * The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly name?: string; - /** - * Additional, verbose details about the error to assist in debugging the indexer. This may not be always available. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly details?: string; - /** - * A link to a troubleshooting guide for these classes of errors. This may not be always available. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentationLink?: string; -} - -/** Represents an item-level warning. */ -export interface SearchIndexerWarning { - /** - * The key of the item which generated a warning. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly key?: string; - /** - * The message describing the warning that occurred while processing the item. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly message: string; - /** - * The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly name?: string; - /** - * Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly details?: string; - /** - * A link to a troubleshooting guide for these classes of warnings. This may not be always available. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentationLink?: string; -} - -export interface SearchIndexerLimits { - /** - * The maximum duration that the indexer is permitted to run for one execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly maxRunTime?: string; - /** - * The maximum size of a document, in bytes, which will be considered valid for indexing. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly maxDocumentExtractionSize?: number; - /** - * The maximum number of characters that will be extracted from a document picked up for indexing. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly maxDocumentContentCharactersToExtract?: number; -} - -/** Represents all of the state that defines and dictates the indexer's current execution. */ -export interface IndexerState { - /** - * The mode the indexer is running in. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly mode?: IndexingMode; - /** - * Change tracking state used when indexing starts on all documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly allDocsInitialTrackingState?: string; - /** - * Change tracking state value when indexing finishes on all documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly allDocsFinalTrackingState?: string; - /** - * Change tracking state used when indexing starts on select, reset documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDocsInitialTrackingState?: string; - /** - * Change tracking state value when indexing finishes on select, reset documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDocsFinalTrackingState?: string; - /** - * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDocumentKeys?: string[]; - /** - * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDatasourceDocumentIds?: string[]; - /** - * Change tracking state used when indexing starts on selective options from the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resyncInitialTrackingState?: string; - /** - * Change tracking state value when indexing finishes on selective options from the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resyncFinalTrackingState?: string; -} - -/** A list of skills. */ -export interface SearchIndexerSkillset { - /** The name of the skillset. */ - name: string; - /** The description of the skillset. */ - description?: string; - /** A list of skills in the skillset. */ - skills: SearchIndexerSkillUnion[]; - /** Details about the Azure AI service to be used when running skills. */ - cognitiveServicesAccount?: CognitiveServicesAccountUnion; - /** Definition of additional projections to Azure blob, table, or files, of enriched data. */ - knowledgeStore?: SearchIndexerKnowledgeStore; - /** Definition of additional projections to secondary search index(es). */ - indexProjection?: SearchIndexerIndexProjection; - /** The ETag of the skillset. */ - etag?: string; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition. Once you have encrypted your skillset definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; -} - -/** Base type for skills. */ -export interface SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Skills.Util.ConditionalSkill" - | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" - | "#Microsoft.Skills.Vision.OcrSkill" - | "#Microsoft.Skills.Vision.ImageAnalysisSkill" - | "#Microsoft.Skills.Text.LanguageDetectionSkill" - | "#Microsoft.Skills.Util.ShaperSkill" - | "#Microsoft.Skills.Text.MergeSkill" - | "#Microsoft.Skills.Text.EntityRecognitionSkill" - | "#Microsoft.Skills.Text.SentimentSkill" - | "#Microsoft.Skills.Text.V3.SentimentSkill" - | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" - | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" - | "#Microsoft.Skills.Text.PIIDetectionSkill" - | "#Microsoft.Skills.Text.SplitSkill" - | "#Microsoft.Skills.Text.CustomEntityLookupSkill" - | "#Microsoft.Skills.Text.TranslationSkill" - | "#Microsoft.Skills.Util.DocumentExtractionSkill" - | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" - | "#Microsoft.Skills.Custom.WebApiSkill" - | "#Microsoft.Skills.Custom.ChatCompletionSkill" - | "#Microsoft.Skills.Util.ContentUnderstandingSkill" - | "#Microsoft.Skills.Custom.AmlSkill" - | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" - | "#Microsoft.Skills.Vision.VectorizeSkill"; - /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */ - name?: string; - /** The description of the skill which describes the inputs, outputs, and usage of the skill. */ - description?: string; - /** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */ - context?: string; - /** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */ - inputs: InputFieldMappingEntry[]; - /** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */ - outputs: OutputFieldMappingEntry[]; -} - -/** Input field mapping for a skill. */ -export interface InputFieldMappingEntry { - /** The name of the input. */ - name: string; - /** The source of the input. */ - source?: string; - /** The source context used for selecting recursive inputs. */ - sourceContext?: string; - /** The recursive inputs used when creating a complex type. */ - inputs?: InputFieldMappingEntry[]; -} - -/** Output field mapping for a skill. */ -export interface OutputFieldMappingEntry { - /** The name of the output defined by the skill. */ - name: string; - /** The target name of the output. It is optional and default to name. */ - targetName?: string; -} - -/** Base type for describing any Azure AI service resource attached to a skillset. */ -export interface CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.DefaultCognitiveServices" - | "#Microsoft.Azure.Search.CognitiveServicesByKey" - | "#Microsoft.Azure.Search.AIServicesByKey" - | "#Microsoft.Azure.Search.AIServicesByIdentity"; - /** Description of the Azure AI service resource attached to a skillset. */ - description?: string; -} - -/** Definition of additional projections to azure blob, table, or files, of enriched data. */ -export interface SearchIndexerKnowledgeStore { - /** The connection string to the storage account projections will be stored in. */ - storageConnectionString: string; - /** A list of additional projections to perform during indexing. */ - projections: SearchIndexerKnowledgeStoreProjection[]; - /** The user-assigned managed identity used for connections to Azure Storage when writing knowledge store projections. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; - /** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ - parameters?: SearchIndexerKnowledgeStoreParameters; -} - -/** Container object for various projection selectors. */ -export interface SearchIndexerKnowledgeStoreProjection { - /** Projections to Azure Table storage. */ - tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[]; - /** Projections to Azure Blob storage. */ - objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[]; - /** Projections to Azure File storage. */ - files?: SearchIndexerKnowledgeStoreFileProjectionSelector[]; -} - -/** Abstract class to share properties between concrete selectors. */ -export interface SearchIndexerKnowledgeStoreProjectionSelector { - /** Name of reference key to different projection. */ - referenceKeyName?: string; - /** Name of generated key to store projection under. */ - generatedKeyName?: string; - /** Source data to project. */ - source?: string; - /** Source context for complex projections. */ - sourceContext?: string; - /** Nested inputs for complex projections. */ - inputs?: InputFieldMappingEntry[]; -} - -/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ -export interface SearchIndexerKnowledgeStoreParameters { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** Whether or not projections should synthesize a generated key name if one isn't already present. */ - synthesizeGeneratedKeyName?: boolean; -} - -/** Definition of additional projections to secondary search indexes. */ -export interface SearchIndexerIndexProjection { - /** A list of projections to be performed to secondary search indexes. */ - selectors: SearchIndexerIndexProjectionSelector[]; - /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ - parameters?: SearchIndexerIndexProjectionParameters; -} - -/** Description for what data to store in the designated search index. */ -export interface SearchIndexerIndexProjectionSelector { - /** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */ - targetIndexName: string; - /** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */ - parentKeyFieldName: string; - /** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */ - sourceContext: string; - /** Mappings for the projection, or which source should be mapped to which field in the target index. */ - mappings: InputFieldMappingEntry[]; -} - -/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ -export interface SearchIndexerIndexProjectionParameters { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** Defines behavior of the index projections in relation to the rest of the indexer. */ - projectionMode?: IndexProjectionMode; -} - -/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */ -export interface ListSkillsetsResult { - /** - * The skillsets defined in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly skillsets: SearchIndexerSkillset[]; -} - -export interface SkillNames { - /** the names of skills to be reset. */ - skillNames?: string[]; -} - -/** Represents a synonym map definition. */ -export interface SynonymMap { - /** The name of the synonym map. */ - name: string; - /** The format of the synonym map. Only the 'solr' format is currently supported. */ - format: "solr"; - /** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */ - synonyms: string; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; - /** The ETag of the synonym map. */ - etag?: string; -} - -/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */ -export interface ListSynonymMapsResult { - /** - * The synonym maps in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly synonymMaps: SynonymMap[]; -} - -/** Represents a search index definition, which describes the fields and search behavior of an index. */ -export interface SearchIndex { - /** The name of the index. */ - name: string; - /** The description of the index. */ - description?: string; - /** The fields of the index. */ - fields: SearchField[]; - /** The scoring profiles for the index. */ - scoringProfiles?: ScoringProfile[]; - /** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */ - defaultScoringProfile?: string; - /** Options to control Cross-Origin Resource Sharing (CORS) for the index. */ - corsOptions?: CorsOptions; - /** The suggesters for the index. */ - suggesters?: Suggester[]; - /** The analyzers for the index. */ - analyzers?: LexicalAnalyzerUnion[]; - /** The tokenizers for the index. */ - tokenizers?: LexicalTokenizerUnion[]; - /** The token filters for the index. */ - tokenFilters?: TokenFilterUnion[]; - /** The character filters for the index. */ - charFilters?: CharFilterUnion[]; - /** The normalizers for the index. */ - normalizers?: LexicalNormalizerUnion[]; - /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ - encryptionKey?: SearchResourceEncryptionKey; - /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */ - similarity?: SimilarityUnion; - /** Defines parameters for a search index that influence semantic capabilities. */ - semanticSearch?: SemanticSearch; - /** Contains configuration options related to vector search. */ - vectorSearch?: VectorSearch; - /** A value indicating whether permission filtering is enabled for the index. */ - permissionFilterOption?: SearchIndexPermissionFilterOption; - /** A value indicating whether the index is leveraging Purview-specific features. This property defaults to false and cannot be changed after index creation. */ - purviewEnabled?: boolean; - /** The ETag of the index. */ - etag?: string; -} - -/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */ -export interface SearchField { - /** The name of the field, which must be unique within the fields collection of the index or parent field. */ - name: string; - /** The data type of the field. */ - type: SearchFieldDataType; - /** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */ - key?: boolean; - /** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields. */ - retrievable?: boolean; - /** An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage overhead. This can only be set during index creation and only for vector fields. This property cannot be changed for existing fields or set as false for new fields. If this property is set as false, the property 'retrievable' must also be set to false. This property must be true or unset for key fields, for new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */ - stored?: boolean; - /** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */ - searchable?: boolean; - /** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */ - filterable?: boolean; - /** A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */ - sortable?: boolean; - /** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */ - facetable?: boolean; - /** A value indicating whether the field should be used as a permission filter. */ - permissionFilter?: PermissionFilter; - /** A value indicating whether the field should be used for sensitivity label filtering. This enables document-level filtering based on Microsoft Purview sensitivity labels. */ - sensitivityLabel?: boolean; - /** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ - analyzer?: LexicalAnalyzerName; - /** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */ - searchAnalyzer?: LexicalAnalyzerName; - /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ - indexAnalyzer?: LexicalAnalyzerName; - /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */ - normalizer?: LexicalNormalizerName; - /** The dimensionality of the vector field. */ - vectorSearchDimensions?: number; - /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */ - vectorSearchProfileName?: string; - /** The encoding format to interpret the field contents. */ - vectorEncodingFormat?: VectorEncodingFormat; - /** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */ - synonymMaps?: string[]; - /** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */ - fields?: SearchField[]; -} - -/** Defines parameters for a search index that influence scoring in search queries. */ -export interface ScoringProfile { - /** The name of the scoring profile. */ - name: string; - /** Parameters that boost scoring based on text matches in certain index fields. */ - textWeights?: TextWeights; - /** The collection of functions that influence the scoring of documents. */ - functions?: ScoringFunctionUnion[]; - /** A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. */ - functionAggregation?: ScoringFunctionAggregation; -} - -/** Defines weights on index fields for which matches should boost scoring in search queries. */ -export interface TextWeights { - /** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */ - weights: { [propertyName: string]: number }; -} - -/** Base type for functions that can modify document scores during ranking. */ -export interface ScoringFunction { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "distance" | "freshness" | "magnitude" | "tag"; - /** The name of the field used as input to the scoring function. */ - fieldName: string; - /** A multiplier for the raw score. Must be a positive number not equal to 1.0. */ - boost: number; - /** A value indicating how boosting will be interpolated across document scores; defaults to "Linear". */ - interpolation?: ScoringFunctionInterpolation; -} - -/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */ -export interface CorsOptions { - /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */ - allowedOrigins: string[]; - /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */ - maxAgeInSeconds?: number; -} - -/** Defines how the Suggest API should apply to a group of fields in the index. */ -export interface Suggester { - /** The name of the suggester. */ - name: string; - /** A value indicating the capabilities of the suggester. */ - searchMode: "analyzingInfixMatching"; - /** The list of field names to which the suggester applies. Each field must be searchable. */ - sourceFields: string[]; -} - -/** Base type for analyzers. */ -export interface LexicalAnalyzer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.CustomAnalyzer" - | "#Microsoft.Azure.Search.PatternAnalyzer" - | "#Microsoft.Azure.Search.StandardAnalyzer" - | "#Microsoft.Azure.Search.StopAnalyzer"; - /** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ - name: string; -} - -/** Base type for tokenizers. */ -export interface LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.ClassicTokenizer" - | "#Microsoft.Azure.Search.EdgeNGramTokenizer" - | "#Microsoft.Azure.Search.KeywordTokenizer" - | "#Microsoft.Azure.Search.KeywordTokenizerV2" - | "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer" - | "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" - | "#Microsoft.Azure.Search.NGramTokenizer" - | "#Microsoft.Azure.Search.PathHierarchyTokenizerV2" - | "#Microsoft.Azure.Search.PatternTokenizer" - | "#Microsoft.Azure.Search.StandardTokenizer" - | "#Microsoft.Azure.Search.StandardTokenizerV2" - | "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; - /** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ - name: string; -} - -/** Base type for token filters. */ -export interface TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.AsciiFoldingTokenFilter" - | "#Microsoft.Azure.Search.CjkBigramTokenFilter" - | "#Microsoft.Azure.Search.CommonGramTokenFilter" - | "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter" - | "#Microsoft.Azure.Search.EdgeNGramTokenFilter" - | "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2" - | "#Microsoft.Azure.Search.ElisionTokenFilter" - | "#Microsoft.Azure.Search.KeepTokenFilter" - | "#Microsoft.Azure.Search.KeywordMarkerTokenFilter" - | "#Microsoft.Azure.Search.LengthTokenFilter" - | "#Microsoft.Azure.Search.LimitTokenFilter" - | "#Microsoft.Azure.Search.NGramTokenFilter" - | "#Microsoft.Azure.Search.NGramTokenFilterV2" - | "#Microsoft.Azure.Search.PatternCaptureTokenFilter" - | "#Microsoft.Azure.Search.PatternReplaceTokenFilter" - | "#Microsoft.Azure.Search.PhoneticTokenFilter" - | "#Microsoft.Azure.Search.ShingleTokenFilter" - | "#Microsoft.Azure.Search.SnowballTokenFilter" - | "#Microsoft.Azure.Search.StemmerTokenFilter" - | "#Microsoft.Azure.Search.StemmerOverrideTokenFilter" - | "#Microsoft.Azure.Search.StopwordsTokenFilter" - | "#Microsoft.Azure.Search.SynonymTokenFilter" - | "#Microsoft.Azure.Search.TruncateTokenFilter" - | "#Microsoft.Azure.Search.UniqueTokenFilter" - | "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; - /** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ - name: string; -} - -/** Base type for character filters. */ -export interface CharFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.MappingCharFilter" - | "#Microsoft.Azure.Search.PatternReplaceCharFilter"; - /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ - name: string; -} - -/** Base type for normalizers. */ -export interface LexicalNormalizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; - /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */ - name: string; -} - -/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */ -export interface Similarity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.ClassicSimilarity" - | "#Microsoft.Azure.Search.BM25Similarity"; -} - -/** Defines parameters for a search index that influence semantic capabilities. */ -export interface SemanticSearch { - /** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */ - defaultConfigurationName?: string; - /** The semantic configurations for the index. */ - configurations?: SemanticConfiguration[]; -} - -/** Defines a specific configuration to be used in the context of semantic capabilities. */ -export interface SemanticConfiguration { - /** The name of the semantic configuration. */ - name: string; - /** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */ - prioritizedFields: SemanticPrioritizedFields; - /** Specifies the score type to be used for the sort order of the search results. */ - rankingOrder?: RankingOrder; - /** Determines which semantic or query rewrite models to use during model flighting/upgrades. */ - flightingOptIn?: boolean; -} - -/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */ -export interface SemanticPrioritizedFields { - /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */ - titleField?: SemanticField; - /** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */ - contentFields?: SemanticField[]; - /** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */ - keywordsFields?: SemanticField[]; -} - -/** A field that is used as part of the semantic configuration. */ -export interface SemanticField { - name: string; -} - -/** Contains configuration options related to vector search. */ -export interface VectorSearch { - /** Defines combinations of configurations to use with vector search. */ - profiles?: VectorSearchProfile[]; - /** Contains configuration options specific to the algorithm used during indexing or querying. */ - algorithms?: VectorSearchAlgorithmConfigurationUnion[]; - /** Contains configuration options on how to vectorize text vector queries. */ - vectorizers?: VectorSearchVectorizerUnion[]; - /** Contains configuration options specific to the compression method used during indexing or querying. */ - compressions?: VectorSearchCompressionUnion[]; -} - -/** Defines a combination of configurations to use with vector search. */ -export interface VectorSearchProfile { - /** The name to associate with this particular vector search profile. */ - name: string; - /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */ - algorithmConfigurationName: string; - /** The name of the vectorization being configured for use with vector search. */ - vectorizerName?: string; - /** The name of the compression method configuration that specifies the compression method and optional parameters. */ - compressionName?: string; -} - -/** Contains configuration options specific to the algorithm used during indexing or querying. */ -export interface VectorSearchAlgorithmConfiguration { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "hnsw" | "exhaustiveKnn"; - /** The name to associate with this particular configuration. */ - name: string; -} - -/** Specifies the vectorization method to be used during query time. */ -export interface VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI" | "customWebApi" | "aiServicesVision" | "aml"; - /** The name to associate with this particular vectorization method. */ - vectorizerName: string; -} - -/** Contains configuration options specific to the compression method used during indexing or querying. */ -export interface VectorSearchCompression { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "scalarQuantization" | "binaryQuantization"; - /** The name to associate with this particular configuration. */ - compressionName: string; - /** Contains the options for rescoring. */ - rescoringOptions?: RescoringOptions; - /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */ - truncationDimension?: number; -} - -/** Contains the options for rescoring. */ -export interface RescoringOptions { - /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */ - enableRescoring?: boolean; - /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */ - defaultOversampling?: number; - /** Controls the storage method for original vectors. This setting is immutable. */ - rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; -} - -/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */ -export interface ListIndexesResult { - /** - * The indexes in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly indexes: SearchIndex[]; -} - -/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ -export interface GetIndexStatisticsResult { - /** - * The number of documents in the index. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentCount: number; - /** - * The amount of storage in bytes consumed by the index. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly storageSize: number; - /** - * The amount of memory in bytes consumed by vectors in the index. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectorIndexSize: number; -} - -/** Specifies some text and analysis components used to break that text into tokens. */ -export interface AnalyzeRequest { - /** The text to break into tokens. */ - text: string; - /** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownAnalyzerNames is an enum containing known values. */ - analyzer?: string; - /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */ - tokenizer?: string; - /** The name of the normalizer to use to normalize the given text. */ - normalizer?: LexicalNormalizerName; - /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ - tokenFilters?: string[]; - /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ - charFilters?: string[]; -} - -/** The result of testing an analyzer on text. */ -export interface AnalyzeResult { - /** The list of tokens returned by the analyzer specified in the request. */ - tokens: AnalyzedTokenInfo[]; -} - -/** Information about a token returned by an analyzer. */ -export interface AnalyzedTokenInfo { - /** - * The token returned by the analyzer. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly token: string; - /** - * The index of the first character of the token in the input text. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly startOffset: number; - /** - * The index of the last character of the token in the input text. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly endOffset: number; - /** - * The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly position: number; -} - -/** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */ -export interface SearchAlias { - /** The name of the alias. */ - name: string; - /** The name of the index this alias maps to. Only one index name may be specified. */ - indexes: string[]; - /** The ETag of the alias. */ - etag?: string; -} - -/** Response from a List Aliases request. If successful, it includes the associated index mappings for all aliases. */ -export interface ListAliasesResult { - /** - * The aliases in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly aliases: SearchAlias[]; -} - -/** Response from a get service statistics request. If successful, it includes service level counters, indexer runtime information, and limits. */ -export interface ServiceStatistics { - /** Service level resource counters. */ - counters: ServiceCounters; - /** Service level indexers runtime information. */ - indexersRuntime: ServiceIndexersRuntime; - /** Service level general limits. */ - limits: ServiceLimits; -} - -/** Represents service-level resource counters and quotas. */ -export interface ServiceCounters { - /** Total number of aliases. */ - aliasCounter: ResourceCounter; - /** Total number of documents across all indexes in the service. */ - documentCounter: ResourceCounter; - /** Total number of indexes. */ - indexCounter: ResourceCounter; - /** Total number of indexers. */ - indexerCounter: ResourceCounter; - /** Total number of data sources. */ - dataSourceCounter: ResourceCounter; - /** Total size of used storage in bytes. */ - storageSizeCounter: ResourceCounter; - /** Total number of synonym maps. */ - synonymMapCounter: ResourceCounter; - /** Total number of skillsets. */ - skillsetCounter: ResourceCounter; - /** Total memory consumption of all vector indexes within the service, in bytes. */ - vectorIndexSizeCounter: ResourceCounter; -} - -/** Represents a resource's usage and quota. */ -export interface ResourceCounter { - /** The resource usage amount. */ - usage: number; - /** The resource amount quota. */ - quota?: number; -} - -/** Represents service level indexers runtime information. */ -export interface ServiceIndexersRuntime { - /** Cumulative runtime of all indexers in the service from the beginningTime to endingTime, in seconds. */ - usedSeconds: number; - /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */ - remainingSeconds?: number; - /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ - beginningTime: Date; - /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ - endingTime: Date; -} - -/** Represents various service level limits. */ -export interface ServiceLimits { - /** The maximum allowed fields per index. */ - maxFieldsPerIndex?: number; - /** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */ - maxFieldNestingDepthPerIndex?: number; - /** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */ - maxComplexCollectionFieldsPerIndex?: number; - /** The maximum number of objects in complex collections allowed per document. */ - maxComplexObjectsInCollectionsPerDocument?: number; - /** The maximum amount of storage in bytes allowed per index. */ - maxStoragePerIndexInBytes?: number; - /** The maximum cumulative runtime in seconds allowed for all indexers in the service over the current UTC period. */ - maxCumulativeIndexerRuntimeSeconds?: number; -} - -/** Response from a request to retrieve stats summary of all indexes. If successful, it includes the stats of each index in the service. */ -export interface ListIndexStatsSummary { - /** - * The Statistics summary of all indexes in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly indexesStatistics: IndexStatisticsSummary[]; -} - -/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ -export interface IndexStatisticsSummary { - /** The name of the index. */ - name: string; - /** - * The number of documents in the index. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentCount: number; - /** - * The amount of storage in bytes consumed by the index. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly storageSize: number; - /** - * The amount of memory in bytes consumed by vectors in the index. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectorIndexSize: number; -} - -/** Specifies the parameters for connecting to the Azure OpenAI resource. */ -export interface AzureOpenAIParameters { - /** The resource URI of the Azure OpenAI resource. */ - resourceUrl?: string; - /** ID of the Azure OpenAI model deployment on the designated resource. */ - deploymentId?: string; - /** API key of the designated Azure OpenAI resource. */ - apiKey?: string; - /** The user-assigned managed identity used for outbound connections. */ - authIdentity?: SearchIndexerDataIdentityUnion; - /** The name of the embedding model that is deployed at the provided deploymentId path. */ - modelName?: AzureOpenAIModelName; -} - -/** Parameters for search index knowledge source. */ -export interface SearchIndexKnowledgeSourceParameters { - /** The name of the Search index. */ - searchIndexName: string; - /** Used to request additional fields for referenced source data. */ - sourceDataFields?: SearchIndexFieldReference[]; - /** Used to restrict which fields to search on the search index. */ - searchFields?: SearchIndexFieldReference[]; - /** Used to specify a different semantic configuration on the target search index other than the default one. */ - semanticConfigurationName?: string; -} - -export interface SearchIndexFieldReference { - name: string; -} - -/** Parameters for Azure Blob Storage knowledge source. */ -export interface AzureBlobKnowledgeSourceParameters { - /** Key-based connection string or the ResourceId format if using a managed identity. */ - connectionString: string; - /** The name of the blob storage container. */ - containerName: string; - /** Optional folder path within the container. */ - folderPath?: string; - /** Set to true if connecting to an ADLS Gen2 storage account. Default is false. */ - isAdlsGen2?: boolean; - /** Consolidates all general ingestion settings. */ - ingestionParameters?: KnowledgeSourceIngestionParameters; - /** - * Resources created by the knowledge source. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly createdResources?: { [propertyName: string]: string }; -} - -/** Consolidates all general ingestion settings for knowledge sources. */ -export interface KnowledgeSourceIngestionParameters { - /** An explicit identity to use for this knowledge source. */ - identity?: SearchIndexerDataIdentityUnion; - /** Optional vectorizer configuration for vectorizing content. */ - embeddingModel?: KnowledgeSourceVectorizerUnion; - /** Optional chat completion model for image verbalization or context extraction. */ - chatCompletionModel?: KnowledgeBaseModelUnion; - /** Indicates whether image verbalization should be disabled. Default is false. */ - disableImageVerbalization?: boolean; - /** Optional schedule for data ingestion. */ - ingestionSchedule?: IndexingSchedule; - /** Optional list of permission types to ingest together with document content. If specified, it will set the indexer permission options for the data source. */ - ingestionPermissionOptions?: KnowledgeSourceIngestionPermissionOption[]; - /** Optional content extraction mode. Default is 'minimal'. */ - contentExtractionMode?: KnowledgeSourceContentExtractionMode; - /** Optional AI Services configuration for content processing. */ - aiServices?: AIServices; -} - -/** Specifies the vectorization method to be used for knowledge source embedding model, with optional name. */ -export interface KnowledgeSourceVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI"; -} - -/** Parameters for Azure Blob Storage knowledge source. */ -export interface AIServices { - /** The URI of the AI Services endpoint. */ - uri: string; - /** The API key for accessing AI Services. */ - apiKey?: string; -} - -/** Parameters for SharePoint knowledge source. */ -export interface IndexedSharePointKnowledgeSourceParameters { - /** SharePoint connection string with format: SharePointOnlineEndpoint=[SharePoint site url];ApplicationId=[Azure AD App ID];ApplicationSecret=[Azure AD App client secret];TenantId=[SharePoint site tenant id] */ - connectionString: string; - /** Specifies which SharePoint libraries to access. */ - containerName: IndexedSharePointContainerName; - /** Optional query to filter SharePoint content. */ - query?: string; - /** Consolidates all general ingestion settings. */ - ingestionParameters?: KnowledgeSourceIngestionParameters; - /** - * Resources created by the knowledge source. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly createdResources?: { [propertyName: string]: string }; -} - -/** Parameters for OneLake knowledge source. */ -export interface IndexedOneLakeKnowledgeSourceParameters { - /** OneLake workspace ID. */ - fabricWorkspaceId: string; - /** Specifies which OneLake lakehouse to access. */ - lakehouseId: string; - /** Optional OneLakehouse folder or shortcut to filter OneLake content. */ - targetPath?: string; - /** Consolidates all general ingestion settings. */ - ingestionParameters?: KnowledgeSourceIngestionParameters; - /** - * Resources created by the knowledge source. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly createdResources?: { [propertyName: string]: string }; -} - -/** Parameters for web knowledge source. */ -export interface WebKnowledgeSourceParameters { - /** Domain allow/block configuration for web results. */ - domains?: WebKnowledgeSourceDomains; -} - -/** Domain allow/block configuration for web knowledge source. */ -export interface WebKnowledgeSourceDomains { - /** Domains that are allowed for web results */ - allowedDomains?: WebKnowledgeSourceDomain[]; - /** Domains that are blocked from web results */ - blockedDomains?: WebKnowledgeSourceDomain[]; -} - -/** Configuration for web knowledge source domain. */ -export interface WebKnowledgeSourceDomain { - /** The address of the domain. */ - address: string; - /** Whether or not to include subpages from this domain. */ - includeSubpages?: boolean; -} - -/** Parameters for remote SharePoint knowledge source. */ -export interface RemoteSharePointKnowledgeSourceParameters { - /** Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to scope the retrieval before the query runs. See documentation: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference */ - filterExpression?: string; - /** A list of metadata fields to be returned for each item in the response. Only retrievable metadata properties can be included in this list. By default, no metadata is returned. Optional. */ - resourceMetadata?: string[]; - /** Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint Online. */ - containerTypeId?: string; -} - -/** Contains the parameters specific to the HNSW algorithm. */ -export interface HnswParameters { - /** The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time. */ - m?: number; - /** The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns. */ - efConstruction?: number; - /** The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns. */ - efSearch?: number; - /** The similarity metric to use for vector comparisons. */ - metric?: VectorSearchAlgorithmMetric; -} - -/** Contains the parameters specific to exhaustive KNN algorithm. */ -export interface ExhaustiveKnnParameters { - /** The similarity metric to use for vector comparisons. */ - metric?: VectorSearchAlgorithmMetric; -} - -/** Contains the parameters specific to Scalar Quantization. */ -export interface ScalarQuantizationParameters { - /** The quantized data type of compressed vector values. */ - quantizedDataType?: VectorSearchCompressionTarget; -} - -/** Specifies the properties for connecting to a user-defined vectorizer. */ -export interface WebApiParameters { - /** The URI of the Web API providing the vectorizer. */ - uri?: string; - /** The headers required to make the HTTP request. */ - httpHeaders?: { [propertyName: string]: string }; - /** The method for the HTTP request. */ - httpMethod?: string; - /** The desired timeout for the request. Default is 30 seconds. */ - timeout?: string; - /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ - authResourceId?: string; - /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - authIdentity?: SearchIndexerDataIdentityUnion; -} - -/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ -export interface AIServicesVisionParameters { - /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ - modelVersion: string | null; - /** The resource URI of the AI Services resource. */ - resourceUri: string; - /** API key of the designated AI Services resource. */ - apiKey?: string; - /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - authIdentity?: SearchIndexerDataIdentityUnion; -} - -/** Specifies the properties for connecting to an AML vectorizer. */ -export interface AMLParameters { - /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ - scoringUri: string | null; - /** (Required for key authentication) The key for the AML service. */ - authenticationKey?: string; - /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ - resourceId?: string; - /** (Optional) When specified, indicates the timeout for the http client making the API call. */ - timeout?: string; - /** (Optional for token authentication). The region the AML service is deployed in. */ - region?: string; - /** The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. */ - modelName?: AIFoundryModelCatalogName; -} - -/** Provides parameter values to a distance scoring function. */ -export interface DistanceScoringParameters { - /** The name of the parameter passed in search queries to specify the reference location. */ - referencePointParameter: string; - /** The distance in kilometers from the reference location where the boosting range ends. */ - boostingDistance: number; -} - -/** Provides parameter values to a freshness scoring function. */ -export interface FreshnessScoringParameters { - /** The expiration period after which boosting will stop for a particular document. */ - boostingDuration: string; -} - -/** Provides parameter values to a magnitude scoring function. */ -export interface MagnitudeScoringParameters { - /** The field value at which boosting starts. */ - boostingRangeStart: number; - /** The field value at which boosting ends. */ - boostingRangeEnd: number; - /** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */ - shouldBoostBeyondRangeByConstant?: boolean; -} - -/** Provides parameter values to a tag scoring function. */ -export interface TagScoringParameters { - /** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */ - tagsParameter: string; -} - -/** An object that contains information about the matches that were found, and related metadata. */ -export interface CustomEntity { - /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. */ - name: string; - /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ - description?: string; - /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ - type?: string; - /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ - subtype?: string; - /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ - id?: string; - /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. */ - caseSensitive?: boolean; - /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */ - accentSensitive?: boolean; - /** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */ - fuzzyEditDistance?: number; - /** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */ - defaultCaseSensitive?: boolean; - /** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */ - defaultAccentSensitive?: boolean; - /** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */ - defaultFuzzyEditDistance?: number; - /** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */ - aliases?: CustomEntityAlias[]; -} - -/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */ -export interface CustomEntityAlias { - /** The text of the alias. */ - text: string; - /** Determine if the alias is case sensitive. */ - caseSensitive?: boolean; - /** Determine if the alias is accent sensitive. */ - accentSensitive?: boolean; - /** Determine the fuzzy edit distance of the alias. */ - fuzzyEditDistance?: number; -} - -export interface AzureOpenAITokenizerParameters { - /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */ - encoderModelName?: SplitSkillEncoderModelName; - /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */ - allowedSpecialTokens?: string[]; -} - -/** Controls the cardinality for chunking the content. */ -export interface DocumentIntelligenceLayoutSkillChunkingProperties { - /** The unit of the chunk. */ - unit?: DocumentIntelligenceLayoutSkillChunkingUnit; - /** The maximum chunk length in characters. Default is 500. */ - maximumLength?: number; - /** The length of overlap provided between two text chunks. Default is 0. */ - overlapLength?: number; -} - -/** Common language model parameters for Chat Completions. If omitted, default values are used. */ -export interface CommonModelParameters { - /** The name of the model to use (e.g., 'gpt-4o', etc.). Default is null if not specified. */ - model?: string; - /** A float in the range [-2,2] that reduces or increases likelihood of repeated tokens. Default is 0. */ - frequencyPenalty?: number; - /** A float in the range [-2,2] that penalizes new tokens based on their existing presence. Default is 0. */ - presencePenalty?: number; - /** Maximum number of tokens to generate. */ - maxTokens?: number; - /** Sampling temperature. Default is 0.7. */ - temperature?: number; - /** Random seed for controlling deterministic outputs. If omitted, randomization is used. */ - seed?: number; - /** List of stop sequences that will cut off text generation. Default is none. */ - stop?: string[]; -} - -/** Determines how the language model's response should be serialized. Defaults to 'text'. */ -export interface ChatCompletionResponseFormat { - /** Specifies how the LLM should format the response. Possible values: 'text' (plain string), 'json_object' (arbitrary JSON), or 'json_schema' (adheres to provided schema). */ - type?: ChatCompletionResponseFormatType; - /** An open dictionary for extended properties. Required if 'type' == 'json_schema' */ - chatCompletionSchemaProperties?: ChatCompletionResponseFormatJsonSchemaProperties; -} - -/** An open dictionary for extended properties. Required if 'type' == 'json_schema' */ -export interface ChatCompletionResponseFormatJsonSchemaProperties { - /** Name of the json schema the model will adhere to */ - name?: string; - /** Description of the json schema the model will adhere to. */ - description?: string; - /** Whether or not the model's response should use structured outputs. Default is true */ - strict?: boolean; - /** Object defining the custom schema the model will use to structure its output. */ - schema?: ChatCompletionSchema; -} - -/** Object defining the custom schema the model will use to structure its output. */ -export interface ChatCompletionSchema { - /** Type of schema representation. Usually 'object'. Default is 'object'. */ - type?: string; - /** A JSON-formatted string that defines the output schema's properties and constraints for the model. */ - properties?: string; - /** An array of the property names that are required to be part of the model's response. All properties must be included for structured outputs. */ - required?: string[]; - /** Controls whether it is allowable for an object to contain additional keys / values that were not defined in the JSON Schema. Default is false. */ - additionalProperties?: boolean; -} - -/** Controls the cardinality for chunking the content. */ -export interface ContentUnderstandingSkillChunkingProperties { - /** The unit of the chunk. */ - unit?: ContentUnderstandingSkillChunkingUnit; - /** The maximum chunk length in characters. Default is 500. */ - maximumLength?: number; - /** The length of overlap provided between two text chunks. Default is 0. */ - overlapLength?: number; -} - -/** Specifies the Azure OpenAI resource used to do query planning. */ -export interface KnowledgeBaseAzureOpenAIModel extends KnowledgeBaseModel { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI"; - /** Contains the parameters specific to Azure OpenAI model endpoint. */ - azureOpenAIParameters: AzureOpenAIParameters; -} - -/** Run knowledge retrieval with minimal reasoning effort. */ -export interface KnowledgeRetrievalMinimalReasoningEffort - extends KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "minimal"; -} - -/** Run knowledge retrieval with low reasoning effort. */ -export interface KnowledgeRetrievalLowReasoningEffort - extends KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "low"; -} - -/** Run knowledge retrieval with medium reasoning effort. */ -export interface KnowledgeRetrievalMediumReasoningEffort - extends KnowledgeRetrievalReasoningEffort { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "medium"; -} - -/** Clears the identity property of a datasource. */ -export interface SearchIndexerDataNoneIdentity - extends SearchIndexerDataIdentity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.DataNoneIdentity"; -} - -/** Specifies the identity for a datasource to use. */ -export interface SearchIndexerDataUserAssignedIdentity - extends SearchIndexerDataIdentity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity"; - /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */ - resourceId: string; -} - -/** Knowledge Source targeting a search index. */ -export interface SearchIndexKnowledgeSource extends KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "searchIndex"; - /** The parameters for the knowledge source. */ - searchIndexParameters: SearchIndexKnowledgeSourceParameters; -} - -/** Configuration for Azure Blob Storage knowledge source. */ -export interface AzureBlobKnowledgeSource extends KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureBlob"; - /** The type of the knowledge source. */ - azureBlobParameters: AzureBlobKnowledgeSourceParameters; -} - -/** Configuration for SharePoint knowledge source. */ -export interface IndexedSharePointKnowledgeSource extends KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "indexedSharePoint"; - /** The parameters for the SharePoint knowledge source. */ - indexedSharePointParameters: IndexedSharePointKnowledgeSourceParameters; -} - -/** Configuration for OneLake knowledge source. */ -export interface IndexedOneLakeKnowledgeSource extends KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "indexedOneLake"; - /** The parameters for the OneLake knowledge source. */ - indexedOneLakeParameters: IndexedOneLakeKnowledgeSourceParameters; -} - -/** Knowledge Source targeting web results. */ -export interface WebKnowledgeSource extends KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "web"; - /** The parameters for the web knowledge source. */ - webParameters?: WebKnowledgeSourceParameters; -} - -/** Configuration for remote SharePoint knowledge source. */ -export interface RemoteSharePointKnowledgeSource extends KnowledgeSource { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "remoteSharePoint"; - /** The parameters for the knowledge source. */ - remoteSharePointParameters?: RemoteSharePointKnowledgeSourceParameters; -} - -/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */ -export interface HighWaterMarkChangeDetectionPolicy - extends DataChangeDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; - /** The name of the high water mark column. */ - highWaterMarkColumnName: string; -} - -/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */ -export interface SqlIntegratedChangeTrackingPolicy - extends DataChangeDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; -} - -/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */ -export interface SoftDeleteColumnDeletionDetectionPolicy - extends DataDeletionDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; - /** The name of the column to use for soft-deletion detection. */ - softDeleteColumnName?: string; - /** The marker value that identifies an item as deleted. */ - softDeleteMarkerValue?: string; -} - -/** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */ -export interface NativeBlobSoftDeleteDeletionDetectionPolicy - extends DataDeletionDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; -} - -/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */ -export interface ConditionalSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; -} - -/** A skill that uses text analytics for key phrase extraction. */ -export interface KeyPhraseExtractionSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; - /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */ - maxKeyPhraseCount?: number; - /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ - modelVersion?: string; -} - -/** A skill that extracts text from image files. */ -export interface OcrSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Vision.OcrSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: OcrSkillLanguage; - /** A value indicating to turn orientation detection on or not. Default is false. */ - shouldDetectOrientation?: boolean; - /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */ - lineEnding?: OcrLineEnding; -} - -/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */ -export interface ImageAnalysisSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: ImageAnalysisSkillLanguage; - /** A list of visual features. */ - visualFeatures?: VisualFeature[]; - /** A string indicating which domain-specific details to return. */ - details?: ImageDetail[]; -} - -/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */ -export interface LanguageDetectionSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; - /** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */ - defaultCountryHint?: string; - /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ - modelVersion?: string; -} - -/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */ -export interface ShaperSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.ShaperSkill"; -} - -/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */ -export interface MergeSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.MergeSkill"; - /** The tag indicates the start of the merged text. By default, the tag is an empty space. */ - insertPreTag?: string; - /** The tag indicates the end of the merged text. By default, the tag is an empty space. */ - insertPostTag?: string; -} - -/** - * This skill is deprecated. Use the V3.EntityRecognitionSkill instead. - * - * @deprecated - */ -export interface EntityRecognitionSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; - /** A list of entity categories that should be extracted. */ - categories?: EntityCategory[]; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: EntityRecognitionSkillLanguage; - /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */ - includeTypelessEntities?: boolean; - /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ - minimumPrecision?: number; -} - -/** - * This skill is deprecated. Use the V3.SentimentSkill instead. - * - * @deprecated - */ -export interface SentimentSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.SentimentSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: SentimentSkillLanguage; -} - -/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */ -export interface SentimentSkillV3 extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: string; - /** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */ - includeOpinionMining?: boolean; - /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ - modelVersion?: string; -} - -/** Using the Text Analytics API, extracts linked entities from text. */ -export interface EntityLinkingSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.V3.EntityLinkingSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: string; - /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ - minimumPrecision?: number; - /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ - modelVersion?: string; -} - -/** Using the Text Analytics API, extracts entities of different types from text. */ -export interface EntityRecognitionSkillV3 extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill"; - /** A list of entity categories that should be extracted. */ - categories?: string[]; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: string; - /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ - minimumPrecision?: number; - /** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ - modelVersion?: string; -} - -/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */ -export interface PIIDetectionSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.PIIDetectionSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: string; - /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ - minimumPrecision?: number; - /** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */ - maskingMode?: PIIDetectionSkillMaskingMode; - /** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */ - maskingCharacter?: string; - /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ - modelVersion?: string; - /** A list of PII entity categories that should be extracted and masked. */ - categories?: string[]; - /** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */ - domain?: string; -} - -/** A skill to split a string into chunks of text. */ -export interface SplitSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.SplitSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: SplitSkillLanguage; - /** A value indicating which split mode to perform. */ - textSplitMode?: TextSplitMode; - /** The desired maximum page length. Default is 10000. */ - maxPageLength?: number; - /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */ - pageOverlapLength?: number; - /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */ - maximumPagesToTake?: number; - /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */ - unit?: SplitSkillUnit; - /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */ - azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; -} - -/** A skill looks for text from a custom, user-defined list of words and phrases. */ -export interface CustomEntityLookupSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill"; - /** A value indicating which language code to use. Default is `en`. */ - defaultLanguageCode?: CustomEntityLookupSkillLanguage; - /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */ - entitiesDefinitionUri?: string; - /** The inline CustomEntity definition. */ - inlineEntitiesDefinition?: CustomEntity[]; - /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */ - globalDefaultCaseSensitive?: boolean; - /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */ - globalDefaultAccentSensitive?: boolean; - /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */ - globalDefaultFuzzyEditDistance?: number; -} - -/** A skill to translate text from one language to another. */ -export interface TextTranslationSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.TranslationSkill"; - /** The language code to translate documents into for documents that don't specify the to language explicitly. */ - defaultToLanguageCode: TextTranslationSkillLanguage; - /** The language code to translate documents from for documents that don't specify the from language explicitly. */ - defaultFromLanguageCode?: TextTranslationSkillLanguage; - /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is `en`. */ - suggestedFrom?: TextTranslationSkillLanguage; -} - -/** A skill that extracts content from a file within the enrichment pipeline. */ -export interface DocumentExtractionSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill"; - /** The parsingMode for the skill. Will be set to 'default' if not defined. */ - parsingMode?: string; - /** The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. */ - dataToExtract?: string; - /** A dictionary of configurations for the skill. */ - configuration?: { [propertyName: string]: any }; -} - -/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */ -export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; - /** Controls the cardinality of the output format. Default is 'markdown'. */ - outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat; - /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ - outputMode?: DocumentIntelligenceLayoutSkillOutputMode; - /** The depth of headers in the markdown output. Default is h6. */ - markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; - /** Controls the cardinality of the content extracted from the document by the skill */ - extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[]; - /** Controls the cardinality for chunking the content. */ - chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties; -} - -/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */ -export interface WebApiSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Skills.Custom.WebApiSkill" - | "#Microsoft.Skills.Custom.ChatCompletionSkill"; - /** The url for the Web API. */ - uri: string; - /** The headers required to make the http request. */ - httpHeaders?: { [propertyName: string]: string }; - /** The method for the http request. */ - httpMethod?: string; - /** The desired timeout for the request. Default is 30 seconds. */ - timeout?: string; - /** The desired batch size which indicates number of documents. */ - batchSize?: number; - /** If set, the number of parallel calls that can be made to the Web API. */ - degreeOfParallelism?: number; - /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ - authResourceId?: string; - /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - authIdentity?: SearchIndexerDataIdentityUnion; -} - -/** A skill that leverages Azure AI Content Understanding to process and extract structured insights from documents, enabling enriched, searchable content for enhanced document indexing and retrieval */ -export interface ContentUnderstandingSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.ContentUnderstandingSkill"; - /** Controls the cardinality of the content extracted from the document by the skill */ - extractionOptions?: ContentUnderstandingSkillExtractionOptions[]; - /** Controls the cardinality for chunking the content. */ - chunkingProperties?: ContentUnderstandingSkillChunkingProperties; -} - -/** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */ -export interface AzureMachineLearningSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Custom.AmlSkill"; - /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ - scoringUri?: string; - /** (Required for key authentication) The key for the AML service. */ - authenticationKey?: string; - /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ - resourceId?: string; - /** (Optional) When specified, indicates the timeout for the http client making the API call. */ - timeout?: string; - /** (Optional for token authentication). The region the AML service is deployed in. */ - region?: string; - /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */ - degreeOfParallelism?: number; -} - -/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */ -export interface AzureOpenAIEmbeddingSkill - extends SearchIndexerSkill, - AzureOpenAIParameters { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; - /** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */ - dimensions?: number; -} - -/** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */ -export interface VisionVectorizeSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; - /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ - modelVersion: string | null; -} - -/** An empty object that represents the default Azure AI service resource for a skillset. */ -export interface DefaultCognitiveServicesAccount - extends CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; -} - -/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */ -export interface CognitiveServicesAccountKey extends CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; - /** The key used to provision the Azure AI service resource attached to a skillset. */ - key: string; -} - -/** The account key of an Azure AI service resource that's attached to a skillset, to be used with the resource's subdomain. */ -export interface AIServicesAccountKey extends CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.AIServicesByKey"; - /** The key used to provision the Azure AI service resource attached to a skillset. */ - key: string; - /** The subdomain url for the corresponding AI Service. */ - subdomainUrl: string; -} - -/** The multi-region account of an Azure AI service resource that's attached to a skillset. */ -export interface AIServicesAccountIdentity extends CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; - /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; - /** The subdomain url for the corresponding AI Service. */ - subdomainUrl: string; -} - -/** Description for what data to store in Azure Tables. */ -export interface SearchIndexerKnowledgeStoreTableProjectionSelector - extends SearchIndexerKnowledgeStoreProjectionSelector { - /** Name of the Azure table to store projected data in. */ - tableName: string; -} - -/** Abstract class to share properties between concrete selectors. */ -export interface SearchIndexerKnowledgeStoreBlobProjectionSelector - extends SearchIndexerKnowledgeStoreProjectionSelector { - /** Blob container to store projections in. */ - storageContainer: string; -} - -/** Defines a function that boosts scores based on distance from a geographic location. */ -export interface DistanceScoringFunction extends ScoringFunction { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "distance"; - /** Parameter values for the distance scoring function. */ - parameters: DistanceScoringParameters; -} - -/** Defines a function that boosts scores based on the value of a date-time field. */ -export interface FreshnessScoringFunction extends ScoringFunction { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "freshness"; - /** Parameter values for the freshness scoring function. */ - parameters: FreshnessScoringParameters; -} - -/** Defines a function that boosts scores based on the magnitude of a numeric field. */ -export interface MagnitudeScoringFunction extends ScoringFunction { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "magnitude"; - /** Parameter values for the magnitude scoring function. */ - parameters: MagnitudeScoringParameters; -} - -/** Defines a function that boosts scores of documents with string values matching a given list of tags. */ -export interface TagScoringFunction extends ScoringFunction { - /** Polymorphic discriminator, which specifies the different types this object can be */ - type: "tag"; - /** Parameter values for the tag scoring function. */ - parameters: TagScoringParameters; -} - -/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */ -export interface CustomAnalyzer extends LexicalAnalyzer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; - /** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. KnownTokenizerNames is an enum containing known values. */ - tokenizerName: string; - /** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ - tokenFilters?: string[]; - /** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ - charFilters?: string[]; -} - -/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */ -export interface PatternAnalyzer extends LexicalAnalyzer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PatternAnalyzer"; - /** A value indicating whether terms should be lower-cased. Default is true. */ - lowerCaseTerms?: boolean; - /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ - pattern?: string; - /** Regular expression flags. */ - flags?: string; - /** A list of stopwords. */ - stopwords?: string[]; -} - -/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */ -export interface LuceneStandardAnalyzer extends LexicalAnalyzer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; - /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ - maxTokenLength?: number; - /** A list of stopwords. */ - stopwords?: string[]; -} - -/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */ -export interface StopAnalyzer extends LexicalAnalyzer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; - /** A list of stopwords. */ - stopwords?: string[]; -} - -/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */ -export interface ClassicTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; - /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ - maxTokenLength?: number; -} - -/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ -export interface EdgeNGramTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; - /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ - minGram?: number; - /** The maximum n-gram length. Default is 2. Maximum is 300. */ - maxGram?: number; - /** Character classes to keep in the tokens. */ - tokenChars?: TokenCharacterKind[]; -} - -/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */ -export interface KeywordTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.KeywordTokenizer"; - /** The read buffer size in bytes. Default is 256. */ - bufferSize?: number; -} - -/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */ -export interface KeywordTokenizerV2 extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2"; - /** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ - maxTokenLength?: number; -} - -/** Divides text using language-specific rules. */ -export interface MicrosoftLanguageTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; - /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ - maxTokenLength?: number; - /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ - isSearchTokenizer?: boolean; - /** The language to use. The default is English. */ - language?: MicrosoftTokenizerLanguage; -} - -/** Divides text using language-specific rules and reduces words to their base forms. */ -export interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; - /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ - maxTokenLength?: number; - /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ - isSearchTokenizer?: boolean; - /** The language to use. The default is English. */ - language?: MicrosoftStemmingTokenizerLanguage; -} - -/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ -export interface NGramTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; - /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ - minGram?: number; - /** The maximum n-gram length. Default is 2. Maximum is 300. */ - maxGram?: number; - /** Character classes to keep in the tokens. */ - tokenChars?: TokenCharacterKind[]; -} - -/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */ -export interface PathHierarchyTokenizerV2 extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; - /** The delimiter character to use. Default is "/". */ - delimiter?: string; - /** A value that, if set, replaces the delimiter character. Default is "/". */ - replacement?: string; - /** The maximum token length. Default and maximum is 300. */ - maxTokenLength?: number; - /** A value indicating whether to generate tokens in reverse order. Default is false. */ - reverseTokenOrder?: boolean; - /** The number of initial tokens to skip. Default is 0. */ - numberOfTokensToSkip?: number; -} - -/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */ -export interface PatternTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PatternTokenizer"; - /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ - pattern?: string; - /** Regular expression flags. */ - flags?: string; - /** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */ - group?: number; -} - -/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */ -export interface LuceneStandardTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StandardTokenizer"; - /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. */ - maxTokenLength?: number; -} - -/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */ -export interface LuceneStandardTokenizerV2 extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2"; - /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ - maxTokenLength?: number; -} - -/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */ -export interface UaxUrlEmailTokenizer extends LexicalTokenizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; - /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ - maxTokenLength?: number; -} - -/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */ -export interface AsciiFoldingTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; - /** A value indicating whether the original token will be kept. Default is false. */ - preserveOriginal?: boolean; -} - -/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */ -export interface CjkBigramTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; - /** The scripts to ignore. */ - ignoreScripts?: CjkBigramTokenFilterScripts[]; - /** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */ - outputUnigrams?: boolean; -} - -/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */ -export interface CommonGramTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; - /** The set of common words. */ - commonWords: string[]; - /** A value indicating whether common words matching will be case insensitive. Default is false. */ - ignoreCase?: boolean; - /** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */ - useQueryMode?: boolean; -} - -/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */ -export interface DictionaryDecompounderTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; - /** The list of words to match against. */ - wordList: string[]; - /** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */ - minWordSize?: number; - /** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */ - minSubwordSize?: number; - /** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */ - maxSubwordSize?: number; - /** A value indicating whether to add only the longest matching subword to the output. Default is false. */ - onlyLongestMatch?: boolean; -} - -/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */ -export interface EdgeNGramTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilter"; - /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */ - minGram?: number; - /** The maximum n-gram length. Default is 2. */ - maxGram?: number; - /** Specifies which side of the input the n-gram should be generated from. Default is "front". */ - side?: EdgeNGramTokenFilterSide; -} - -/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */ -export interface EdgeNGramTokenFilterV2 extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"; - /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ - minGram?: number; - /** The maximum n-gram length. Default is 2. Maximum is 300. */ - maxGram?: number; - /** Specifies which side of the input the n-gram should be generated from. Default is "front". */ - side?: EdgeNGramTokenFilterSide; -} - -/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. */ -export interface ElisionTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; - /** The set of articles to remove. */ - articles?: string[]; -} - -/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */ -export interface KeepTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; - /** The list of words to keep. */ - keepWords: string[]; - /** A value indicating whether to lower case all words first. Default is false. */ - lowerCaseKeepWords?: boolean; -} - -/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */ -export interface KeywordMarkerTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; - /** A list of words to mark as keywords. */ - keywords: string[]; - /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ - ignoreCase?: boolean; -} - -/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */ -export interface LengthTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; - /** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */ - minLength?: number; - /** The maximum length in characters. Default and maximum is 300. */ - maxLength?: number; -} - -/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */ -export interface LimitTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; - /** The maximum number of tokens to produce. Default is 1. */ - maxTokenCount?: number; - /** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */ - consumeAllTokens?: boolean; -} - -/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */ -export interface NGramTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.NGramTokenFilter"; - /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */ - minGram?: number; - /** The maximum n-gram length. Default is 2. */ - maxGram?: number; -} - -/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */ -export interface NGramTokenFilterV2 extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2"; - /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ - minGram?: number; - /** The maximum n-gram length. Default is 2. Maximum is 300. */ - maxGram?: number; -} - -/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */ -export interface PatternCaptureTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; - /** A list of patterns to match against each token. */ - patterns: string[]; - /** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */ - preserveOriginal?: boolean; -} - -/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. */ -export interface PatternReplaceTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; - /** A regular expression pattern. */ - pattern: string; - /** The replacement text. */ - replacement: string; -} - -/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */ -export interface PhoneticTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; - /** The phonetic encoder to use. Default is "metaphone". */ - encoder?: PhoneticEncoder; - /** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */ - replaceOriginalTokens?: boolean; -} - -/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */ -export interface ShingleTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; - /** The maximum shingle size. Default and minimum value is 2. */ - maxShingleSize?: number; - /** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */ - minShingleSize?: number; - /** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */ - outputUnigrams?: boolean; - /** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */ - outputUnigramsIfNoShingles?: boolean; - /** The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). */ - tokenSeparator?: string; - /** The string to insert for each position at which there is no token. Default is an underscore ("_"). */ - filterToken?: string; -} - -/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */ -export interface SnowballTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; - /** The language to use. */ - language: SnowballTokenFilterLanguage; -} - -/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */ -export interface StemmerTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; - /** The language to use. */ - language: StemmerTokenFilterLanguage; -} - -/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */ -export interface StemmerOverrideTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; - /** A list of stemming rules in the following format: "word => stem", for example: "ran => run". */ - rules: string[]; -} - -/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */ -export interface StopwordsTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; - /** The list of stopwords. This property and the stopwords list property cannot both be set. */ - stopwords?: string[]; - /** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */ - stopwordsList?: StopwordsList; - /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ - ignoreCase?: boolean; - /** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */ - removeTrailingStopWords?: boolean; -} - -/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */ -export interface SynonymTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; - /** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */ - synonyms: string[]; - /** A value indicating whether to case-fold input for matching. Default is false. */ - ignoreCase?: boolean; - /** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */ - expand?: boolean; -} - -/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */ -export interface TruncateTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; - /** The length at which terms will be truncated. Default and maximum is 300. */ - length?: number; -} - -/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */ -export interface UniqueTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; - /** A value indicating whether to remove duplicates only at the same position. Default is false. */ - onlyOnSamePosition?: boolean; -} - -/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */ -export interface WordDelimiterTokenFilter extends TokenFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; - /** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. */ - generateWordParts?: boolean; - /** A value indicating whether to generate number subwords. Default is true. */ - generateNumberParts?: boolean; - /** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. */ - catenateWords?: boolean; - /** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. */ - catenateNumbers?: boolean; - /** A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. */ - catenateAll?: boolean; - /** A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. */ - splitOnCaseChange?: boolean; - /** A value indicating whether original words will be preserved and added to the subword list. Default is false. */ - preserveOriginal?: boolean; - /** A value indicating whether to split on numbers. For example, if this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. */ - splitOnNumerics?: boolean; - /** A value indicating whether to remove trailing "'s" for each subword. Default is true. */ - stemEnglishPossessive?: boolean; - /** A list of tokens to protect from being delimited. */ - protectedWords?: string[]; -} - -/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */ -export interface MappingCharFilter extends CharFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; - /** A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). */ - mappings: string[]; -} - -/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. */ -export interface PatternReplaceCharFilter extends CharFilter { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; - /** A regular expression pattern. */ - pattern: string; - /** The replacement text. */ - replacement: string; -} - -/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */ -export interface CustomNormalizer extends LexicalNormalizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; - /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ - tokenFilters?: TokenFilterName[]; - /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ - charFilters?: CharFilterName[]; -} - -/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */ -export interface ClassicSimilarity extends Similarity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; -} - -/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */ -export interface BM25Similarity extends Similarity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.BM25Similarity"; - /** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */ - k1?: number; - /** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */ - b?: number; -} - -/** Contains configuration options specific to the HNSW approximate nearest neighbors algorithm used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search speed and accuracy. */ -export interface HnswAlgorithmConfiguration - extends VectorSearchAlgorithmConfiguration { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "hnsw"; - /** Contains the parameters specific to HNSW algorithm. */ - parameters?: HnswParameters; -} - -/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */ -export interface ExhaustiveKnnAlgorithmConfiguration - extends VectorSearchAlgorithmConfiguration { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "exhaustiveKnn"; - /** Contains the parameters specific to exhaustive KNN algorithm. */ - parameters?: ExhaustiveKnnParameters; -} - -/** Specifies the Azure OpenAI resource used to vectorize a query string. */ -export interface AzureOpenAIVectorizer extends VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI"; - /** Contains the parameters specific to Azure OpenAI embedding vectorization. */ - parameters?: AzureOpenAIParameters; -} - -/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */ -export interface WebApiVectorizer extends VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "customWebApi"; - /** Specifies the properties of the user-defined vectorizer. */ - parameters?: WebApiParameters; -} - -/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ -export interface AIServicesVisionVectorizer extends VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "aiServicesVision"; - /** Contains the parameters specific to AI Services Vision embedding vectorization. */ - aIServicesVisionParameters?: AIServicesVisionParameters; -} - -/** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. */ -export interface AMLVectorizer extends VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "aml"; - /** Specifies the properties of the AML vectorizer. */ - aMLParameters?: AMLParameters; -} - -/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */ -export interface ScalarQuantizationCompression extends VectorSearchCompression { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "scalarQuantization"; - /** Contains the parameters specific to Scalar Quantization. */ - parameters?: ScalarQuantizationParameters; -} - -/** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */ -export interface BinaryQuantizationCompression extends VectorSearchCompression { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "binaryQuantization"; -} - -/** Specifies the Azure OpenAI resource used to vectorize a query string. */ -export interface KnowledgeSourceAzureOpenAIVectorizer - extends KnowledgeSourceVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI"; - /** Contains the parameters specific to Azure OpenAI embedding vectorization. */ - azureOpenAIParameters?: AzureOpenAIParameters; -} - -/** A skill that calls a language model via Azure AI Foundry's Chat Completions endpoint. */ -export interface ChatCompletionSkill extends WebApiSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Custom.ChatCompletionSkill"; - /** API key for authenticating to the model. Both apiKey and authIdentity cannot be specified at the same time. */ - apiKey?: string; - /** Common language model parameters that customers can tweak. If omitted, reasonable defaults will be applied. */ - commonModelParameters?: CommonModelParameters; - /** Open-type dictionary for model-specific parameters that should be appended to the chat completions call. Follows Azure AI Foundry’s extensibility pattern. */ - extraParameters?: { [propertyName: string]: any }; - /** How extra parameters are handled by Azure AI Foundry. Default is 'error'. */ - extraParametersBehavior?: ChatCompletionExtraParametersBehavior; - /** Determines how the LLM should format its response. Defaults to 'text' response type. */ - responseFormat?: ChatCompletionResponseFormat; -} - -/** Projection definition for what data to store in Azure Blob. */ -export interface SearchIndexerKnowledgeStoreObjectProjectionSelector - extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} - -/** Projection definition for what data to store in Azure Files. */ -export interface SearchIndexerKnowledgeStoreFileProjectionSelector - extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} - -/** Known values of {@link ApiVersion20251101Preview} that the service accepts. */ -export enum KnownApiVersion20251101Preview { - /** Api Version '2025-11-01-preview' */ - TwoThousandTwentyFive1101Preview = "2025-11-01-preview", -} - -/** - * Defines values for ApiVersion20251101Preview. \ - * {@link KnownApiVersion20251101Preview} can be used interchangeably with ApiVersion20251101Preview, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **2025-11-01-preview**: Api Version '2025-11-01-preview' - */ -export type ApiVersion20251101Preview = string; - -/** Known values of {@link KnowledgeBaseModelKind} that the service accepts. */ -export enum KnownKnowledgeBaseModelKind { - /** Use Azure Open AI models for query planning. */ - AzureOpenAI = "azureOpenAI", -} - -/** - * Defines values for KnowledgeBaseModelKind. \ - * {@link KnownKnowledgeBaseModelKind} can be used interchangeably with KnowledgeBaseModelKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **azureOpenAI**: Use Azure Open AI models for query planning. - */ -export type KnowledgeBaseModelKind = string; - -/** Known values of {@link KnowledgeRetrievalReasoningEffortKind} that the service accepts. */ -export enum KnownKnowledgeRetrievalReasoningEffortKind { - /** Does not perform any source selections, query planning, or iterative search. */ - Minimal = "minimal", - /** Use low reasoning during retrieval. */ - Low = "low", - /** Use a moderate amount of reasoning during retrieval. */ - Medium = "medium", -} - -/** - * Defines values for KnowledgeRetrievalReasoningEffortKind. \ - * {@link KnownKnowledgeRetrievalReasoningEffortKind} can be used interchangeably with KnowledgeRetrievalReasoningEffortKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **minimal**: Does not perform any source selections, query planning, or iterative search. \ - * **low**: Use low reasoning during retrieval. \ - * **medium**: Use a moderate amount of reasoning during retrieval. - */ -export type KnowledgeRetrievalReasoningEffortKind = string; - -/** Known values of {@link KnowledgeRetrievalOutputMode} that the service accepts. */ -export enum KnownKnowledgeRetrievalOutputMode { - /** Return data from the knowledge sources directly without generative alteration. */ - ExtractiveData = "extractiveData", - /** Synthesize an answer for the response payload. */ - AnswerSynthesis = "answerSynthesis", -} - -/** - * Defines values for KnowledgeRetrievalOutputMode. \ - * {@link KnownKnowledgeRetrievalOutputMode} can be used interchangeably with KnowledgeRetrievalOutputMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **extractiveData**: Return data from the knowledge sources directly without generative alteration. \ - * **answerSynthesis**: Synthesize an answer for the response payload. - */ -export type KnowledgeRetrievalOutputMode = string; - -/** Known values of {@link KnowledgeSourceKind} that the service accepts. */ -export enum KnownKnowledgeSourceKind { - /** A knowledge source that retrieves data from a Search Index. */ - SearchIndex = "searchIndex", - /** A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index. */ - AzureBlob = "azureBlob", - /** A knowledge source that retrieves data from the web. */ - Web = "web", - /** A knowledge source that retrieves data from a remote SharePoint endpoint. */ - RemoteSharePoint = "remoteSharePoint", - /** A knowledge source that retrieves and ingests data from SharePoint to a Search Index. */ - IndexedSharePoint = "indexedSharePoint", - /** A knowledge source that retrieves and ingests data from OneLake to a Search Index. */ - IndexedOneLake = "indexedOneLake", -} - -/** - * Defines values for KnowledgeSourceKind. \ - * {@link KnownKnowledgeSourceKind} can be used interchangeably with KnowledgeSourceKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **searchIndex**: A knowledge source that retrieves data from a Search Index. \ - * **azureBlob**: A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index. \ - * **web**: A knowledge source that retrieves data from the web. \ - * **remoteSharePoint**: A knowledge source that retrieves data from a remote SharePoint endpoint. \ - * **indexedSharePoint**: A knowledge source that retrieves and ingests data from SharePoint to a Search Index. \ - * **indexedOneLake**: A knowledge source that retrieves and ingests data from OneLake to a Search Index. - */ -export type KnowledgeSourceKind = string; - -/** Known values of {@link KnowledgeSourceSynchronizationStatus} that the service accepts. */ -export enum KnownKnowledgeSourceSynchronizationStatus { - /** The knowledge source is being provisioned. */ - Creating = "creating", - /** The knowledge source is active and synchronization runs are occurring. */ - Active = "active", - /** The knowledge source is being deleted and synchronization is paused. */ - Deleting = "deleting", -} - -/** - * Defines values for KnowledgeSourceSynchronizationStatus. \ - * {@link KnownKnowledgeSourceSynchronizationStatus} can be used interchangeably with KnowledgeSourceSynchronizationStatus, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **creating**: The knowledge source is being provisioned. \ - * **active**: The knowledge source is active and synchronization runs are occurring. \ - * **deleting**: The knowledge source is being deleted and synchronization is paused. - */ -export type KnowledgeSourceSynchronizationStatus = string; - -/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */ -export enum KnownSearchIndexerDataSourceType { - /** Indicates an Azure SQL datasource. */ - AzureSql = "azuresql", - /** Indicates a CosmosDB datasource. */ - CosmosDb = "cosmosdb", - /** Indicates an Azure Blob datasource. */ - AzureBlob = "azureblob", - /** Indicates an Azure Table datasource. */ - AzureTable = "azuretable", - /** Indicates a MySql datasource. */ - MySql = "mysql", - /** Indicates an ADLS Gen2 datasource. */ - AdlsGen2 = "adlsgen2", - /** Indicates a Microsoft Fabric OneLake datasource. */ - OneLake = "onelake", - /** Indicates a SharePoint datasource. */ - SharePoint = "sharepoint", -} - -/** - * Defines values for SearchIndexerDataSourceType. \ - * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **azuresql**: Indicates an Azure SQL datasource. \ - * **cosmosdb**: Indicates a CosmosDB datasource. \ - * **azureblob**: Indicates an Azure Blob datasource. \ - * **azuretable**: Indicates an Azure Table datasource. \ - * **mysql**: Indicates a MySql datasource. \ - * **adlsgen2**: Indicates an ADLS Gen2 datasource. \ - * **onelake**: Indicates a Microsoft Fabric OneLake datasource. \ - * **sharepoint**: Indicates a SharePoint datasource. - */ -export type SearchIndexerDataSourceType = string; - -/** Known values of {@link IndexerPermissionOption} that the service accepts. */ -export enum KnownIndexerPermissionOption { - /** Indexer to ingest ACL userIds from data source to index. */ - UserIds = "userIds", - /** Indexer to ingest ACL groupIds from data source to index. */ - GroupIds = "groupIds", - /** Indexer to ingest Azure RBAC scope from data source to index. */ - RbacScope = "rbacScope", -} - -/** - * Defines values for IndexerPermissionOption. \ - * {@link KnownIndexerPermissionOption} can be used interchangeably with IndexerPermissionOption, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **userIds**: Indexer to ingest ACL userIds from data source to index. \ - * **groupIds**: Indexer to ingest ACL groupIds from data source to index. \ - * **rbacScope**: Indexer to ingest Azure RBAC scope from data source to index. - */ -export type IndexerPermissionOption = string; - -/** Known values of {@link IndexerResyncOption} that the service accepts. */ -export enum KnownIndexerResyncOption { - /** Indexer to re-ingest pre-selected permissions data from data source to index. */ - Permissions = "permissions", -} - -/** - * Defines values for IndexerResyncOption. \ - * {@link KnownIndexerResyncOption} can be used interchangeably with IndexerResyncOption, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **permissions**: Indexer to re-ingest pre-selected permissions data from data source to index. - */ -export type IndexerResyncOption = string; - -/** Known values of {@link BlobIndexerParsingMode} that the service accepts. */ -export enum KnownBlobIndexerParsingMode { - /** Set to default for normal file processing. */ - Default = "default", - /** Set to text to improve indexing performance on plain text files in blob storage. */ - Text = "text", - /** Set to delimitedText when blobs are plain CSV files. */ - DelimitedText = "delimitedText", - /** Set to json to extract structured content from JSON files. */ - Json = "json", - /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */ - JsonArray = "jsonArray", - /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */ - JsonLines = "jsonLines", - /** Set to markdown to extract content from markdown files. */ - Markdown = "markdown", -} - -/** - * Defines values for BlobIndexerParsingMode. \ - * {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **default**: Set to default for normal file processing. \ - * **text**: Set to text to improve indexing performance on plain text files in blob storage. \ - * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \ - * **json**: Set to json to extract structured content from JSON files. \ - * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \ - * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. \ - * **markdown**: Set to markdown to extract content from markdown files. - */ -export type BlobIndexerParsingMode = string; - -/** Known values of {@link MarkdownParsingSubmode} that the service accepts. */ -export enum KnownMarkdownParsingSubmode { - /** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */ - OneToMany = "oneToMany", - /** Indicates that each markdown file will be parsed into a single search document. */ - OneToOne = "oneToOne", -} - -/** - * Defines values for MarkdownParsingSubmode. \ - * {@link KnownMarkdownParsingSubmode} can be used interchangeably with MarkdownParsingSubmode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **oneToMany**: Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. \ - * **oneToOne**: Indicates that each markdown file will be parsed into a single search document. - */ -export type MarkdownParsingSubmode = string; - -/** Known values of {@link MarkdownHeaderDepth} that the service accepts. */ -export enum KnownMarkdownHeaderDepth { - /** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */ - H1 = "h1", - /** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */ - H2 = "h2", - /** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */ - H3 = "h3", - /** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */ - H4 = "h4", - /** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */ - H5 = "h5", - /** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */ - H6 = "h6", -} - -/** - * Defines values for MarkdownHeaderDepth. \ - * {@link KnownMarkdownHeaderDepth} can be used interchangeably with MarkdownHeaderDepth, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **h1**: Indicates that headers up to a level of h1 will be considered while grouping markdown content. \ - * **h2**: Indicates that headers up to a level of h2 will be considered while grouping markdown content. \ - * **h3**: Indicates that headers up to a level of h3 will be considered while grouping markdown content. \ - * **h4**: Indicates that headers up to a level of h4 will be considered while grouping markdown content. \ - * **h5**: Indicates that headers up to a level of h5 will be considered while grouping markdown content. \ - * **h6**: Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. - */ -export type MarkdownHeaderDepth = string; - -/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */ -export enum KnownBlobIndexerDataToExtract { - /** Indexes just the standard blob properties and user-specified metadata. */ - StorageMetadata = "storageMetadata", - /** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */ - AllMetadata = "allMetadata", - /** Extracts all metadata and textual content from each blob. */ - ContentAndMetadata = "contentAndMetadata", -} - -/** - * Defines values for BlobIndexerDataToExtract. \ - * {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \ - * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \ - * **contentAndMetadata**: Extracts all metadata and textual content from each blob. - */ -export type BlobIndexerDataToExtract = string; - -/** Known values of {@link BlobIndexerImageAction} that the service accepts. */ -export enum KnownBlobIndexerImageAction { - /** Ignores embedded images or image files in the data set. This is the default. */ - None = "none", - /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */ - GenerateNormalizedImages = "generateNormalizedImages", - /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. */ - GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage", -} - -/** - * Defines values for BlobIndexerImageAction. \ - * {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Ignores embedded images or image files in the data set. This is the default. \ - * **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \ - * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. - */ -export type BlobIndexerImageAction = string; - -/** Known values of {@link BlobIndexerPDFTextRotationAlgorithm} that the service accepts. */ -export enum KnownBlobIndexerPDFTextRotationAlgorithm { - /** Leverages normal text extraction. This is the default. */ - None = "none", - /** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */ - DetectAngles = "detectAngles", -} - -/** - * Defines values for BlobIndexerPDFTextRotationAlgorithm. \ - * {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Leverages normal text extraction. This is the default. \ - * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. - */ -export type BlobIndexerPDFTextRotationAlgorithm = string; - -/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */ -export enum KnownIndexerExecutionEnvironment { - /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */ - Standard = "standard", - /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */ - Private = "private", -} - -/** - * Defines values for IndexerExecutionEnvironment. \ - * {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **standard**: Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \ - * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. - */ -export type IndexerExecutionEnvironment = string; - -/** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */ -export enum KnownIndexerExecutionStatusDetail { - /** Indicates that the reset that occurred was for a call to ResetDocs. */ - ResetDocs = "resetDocs", - /** Indicates to selectively resync based on option(s) from data source. */ - Resync = "resync", -} - -/** - * Defines values for IndexerExecutionStatusDetail. \ - * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs. \ - * **resync**: Indicates to selectively resync based on option(s) from data source. - */ -export type IndexerExecutionStatusDetail = string; - -/** Known values of {@link IndexingMode} that the service accepts. */ -export enum KnownIndexingMode { - /** The indexer is indexing all documents in the datasource. */ - IndexingAllDocs = "indexingAllDocs", - /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */ - IndexingResetDocs = "indexingResetDocs", - /** The indexer is resyncing and indexing selective option(s) from the datasource. */ - IndexingResync = "indexingResync", -} - -/** - * Defines values for IndexingMode. \ - * {@link KnownIndexingMode} can be used interchangeably with IndexingMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \ - * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. \ - * **indexingResync**: The indexer is resyncing and indexing selective option(s) from the datasource. - */ -export type IndexingMode = string; - -/** Known values of {@link IndexProjectionMode} that the service accepts. */ -export enum KnownIndexProjectionMode { - /** The source document will be skipped from writing into the indexer's target index. */ - SkipIndexingParentDocuments = "skipIndexingParentDocuments", - /** The source document will be written into the indexer's target index. This is the default pattern. */ - IncludeIndexingParentDocuments = "includeIndexingParentDocuments", -} - -/** - * Defines values for IndexProjectionMode. \ - * {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \ - * **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern. - */ -export type IndexProjectionMode = string; - -/** Known values of {@link SearchFieldDataType} that the service accepts. */ -export enum KnownSearchFieldDataType { - /** Indicates that a field contains a string. */ - String = "Edm.String", - /** Indicates that a field contains a 32-bit signed integer. */ - Int32 = "Edm.Int32", - /** Indicates that a field contains a 64-bit signed integer. */ - Int64 = "Edm.Int64", - /** Indicates that a field contains an IEEE double-precision floating point number. */ - Double = "Edm.Double", - /** Indicates that a field contains a Boolean value (true or false). */ - Boolean = "Edm.Boolean", - /** Indicates that a field contains a date\/time value, including timezone information. */ - DateTimeOffset = "Edm.DateTimeOffset", - /** Indicates that a field contains a geo-location in terms of longitude and latitude. */ - GeographyPoint = "Edm.GeographyPoint", - /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */ - Complex = "Edm.ComplexType", - /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */ - Single = "Edm.Single", - /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */ - Half = "Edm.Half", - /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */ - Int16 = "Edm.Int16", - /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */ - SByte = "Edm.SByte", - /** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */ - Byte = "Edm.Byte", -} - -/** - * Defines values for SearchFieldDataType. \ - * {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **Edm.String**: Indicates that a field contains a string. \ - * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \ - * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \ - * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \ - * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \ - * **Edm.DateTimeOffset**: Indicates that a field contains a date\/time value, including timezone information. \ - * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \ - * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \ - * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). \ - * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). \ - * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). \ - * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). \ - * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). - */ -export type SearchFieldDataType = string; - -/** Known values of {@link PermissionFilter} that the service accepts. */ -export enum KnownPermissionFilter { - /** Field represents user IDs that should be used to filter document access on queries. */ - UserIds = "userIds", - /** Field represents group IDs that should be used to filter document access on queries. */ - GroupIds = "groupIds", - /** Field represents an RBAC scope that should be used to filter document access on queries. */ - RbacScope = "rbacScope", -} - -/** - * Defines values for PermissionFilter. \ - * {@link KnownPermissionFilter} can be used interchangeably with PermissionFilter, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **userIds**: Field represents user IDs that should be used to filter document access on queries. \ - * **groupIds**: Field represents group IDs that should be used to filter document access on queries. \ - * **rbacScope**: Field represents an RBAC scope that should be used to filter document access on queries. - */ -export type PermissionFilter = string; - -/** Known values of {@link LexicalAnalyzerName} that the service accepts. */ -export enum KnownLexicalAnalyzerName { - /** Microsoft analyzer for Arabic. */ - ArMicrosoft = "ar.microsoft", - /** Lucene analyzer for Arabic. */ - ArLucene = "ar.lucene", - /** Lucene analyzer for Armenian. */ - HyLucene = "hy.lucene", - /** Microsoft analyzer for Bangla. */ - BnMicrosoft = "bn.microsoft", - /** Lucene analyzer for Basque. */ - EuLucene = "eu.lucene", - /** Microsoft analyzer for Bulgarian. */ - BgMicrosoft = "bg.microsoft", - /** Lucene analyzer for Bulgarian. */ - BgLucene = "bg.lucene", - /** Microsoft analyzer for Catalan. */ - CaMicrosoft = "ca.microsoft", - /** Lucene analyzer for Catalan. */ - CaLucene = "ca.lucene", - /** Microsoft analyzer for Chinese (Simplified). */ - ZhHansMicrosoft = "zh-Hans.microsoft", - /** Lucene analyzer for Chinese (Simplified). */ - ZhHansLucene = "zh-Hans.lucene", - /** Microsoft analyzer for Chinese (Traditional). */ - ZhHantMicrosoft = "zh-Hant.microsoft", - /** Lucene analyzer for Chinese (Traditional). */ - ZhHantLucene = "zh-Hant.lucene", - /** Microsoft analyzer for Croatian. */ - HrMicrosoft = "hr.microsoft", - /** Microsoft analyzer for Czech. */ - CsMicrosoft = "cs.microsoft", - /** Lucene analyzer for Czech. */ - CsLucene = "cs.lucene", - /** Microsoft analyzer for Danish. */ - DaMicrosoft = "da.microsoft", - /** Lucene analyzer for Danish. */ - DaLucene = "da.lucene", - /** Microsoft analyzer for Dutch. */ - NlMicrosoft = "nl.microsoft", - /** Lucene analyzer for Dutch. */ - NlLucene = "nl.lucene", - /** Microsoft analyzer for English. */ - EnMicrosoft = "en.microsoft", - /** Lucene analyzer for English. */ - EnLucene = "en.lucene", - /** Microsoft analyzer for Estonian. */ - EtMicrosoft = "et.microsoft", - /** Microsoft analyzer for Finnish. */ - FiMicrosoft = "fi.microsoft", - /** Lucene analyzer for Finnish. */ - FiLucene = "fi.lucene", - /** Microsoft analyzer for French. */ - FrMicrosoft = "fr.microsoft", - /** Lucene analyzer for French. */ - FrLucene = "fr.lucene", - /** Lucene analyzer for Galician. */ - GlLucene = "gl.lucene", - /** Microsoft analyzer for German. */ - DeMicrosoft = "de.microsoft", - /** Lucene analyzer for German. */ - DeLucene = "de.lucene", - /** Microsoft analyzer for Greek. */ - ElMicrosoft = "el.microsoft", - /** Lucene analyzer for Greek. */ - ElLucene = "el.lucene", - /** Microsoft analyzer for Gujarati. */ - GuMicrosoft = "gu.microsoft", - /** Microsoft analyzer for Hebrew. */ - HeMicrosoft = "he.microsoft", - /** Microsoft analyzer for Hindi. */ - HiMicrosoft = "hi.microsoft", - /** Lucene analyzer for Hindi. */ - HiLucene = "hi.lucene", - /** Microsoft analyzer for Hungarian. */ - HuMicrosoft = "hu.microsoft", - /** Lucene analyzer for Hungarian. */ - HuLucene = "hu.lucene", - /** Microsoft analyzer for Icelandic. */ - IsMicrosoft = "is.microsoft", - /** Microsoft analyzer for Indonesian (Bahasa). */ - IdMicrosoft = "id.microsoft", - /** Lucene analyzer for Indonesian. */ - IdLucene = "id.lucene", - /** Lucene analyzer for Irish. */ - GaLucene = "ga.lucene", - /** Microsoft analyzer for Italian. */ - ItMicrosoft = "it.microsoft", - /** Lucene analyzer for Italian. */ - ItLucene = "it.lucene", - /** Microsoft analyzer for Japanese. */ - JaMicrosoft = "ja.microsoft", - /** Lucene analyzer for Japanese. */ - JaLucene = "ja.lucene", - /** Microsoft analyzer for Kannada. */ - KnMicrosoft = "kn.microsoft", - /** Microsoft analyzer for Korean. */ - KoMicrosoft = "ko.microsoft", - /** Lucene analyzer for Korean. */ - KoLucene = "ko.lucene", - /** Microsoft analyzer for Latvian. */ - LvMicrosoft = "lv.microsoft", - /** Lucene analyzer for Latvian. */ - LvLucene = "lv.lucene", - /** Microsoft analyzer for Lithuanian. */ - LtMicrosoft = "lt.microsoft", - /** Microsoft analyzer for Malayalam. */ - MlMicrosoft = "ml.microsoft", - /** Microsoft analyzer for Malay (Latin). */ - MsMicrosoft = "ms.microsoft", - /** Microsoft analyzer for Marathi. */ - MrMicrosoft = "mr.microsoft", - /** Microsoft analyzer for Norwegian (Bokmål). */ - NbMicrosoft = "nb.microsoft", - /** Lucene analyzer for Norwegian. */ - NoLucene = "no.lucene", - /** Lucene analyzer for Persian. */ - FaLucene = "fa.lucene", - /** Microsoft analyzer for Polish. */ - PlMicrosoft = "pl.microsoft", - /** Lucene analyzer for Polish. */ - PlLucene = "pl.lucene", - /** Microsoft analyzer for Portuguese (Brazil). */ - PtBrMicrosoft = "pt-BR.microsoft", - /** Lucene analyzer for Portuguese (Brazil). */ - PtBrLucene = "pt-BR.lucene", - /** Microsoft analyzer for Portuguese (Portugal). */ - PtPtMicrosoft = "pt-PT.microsoft", - /** Lucene analyzer for Portuguese (Portugal). */ - PtPtLucene = "pt-PT.lucene", - /** Microsoft analyzer for Punjabi. */ - PaMicrosoft = "pa.microsoft", - /** Microsoft analyzer for Romanian. */ - RoMicrosoft = "ro.microsoft", - /** Lucene analyzer for Romanian. */ - RoLucene = "ro.lucene", - /** Microsoft analyzer for Russian. */ - RuMicrosoft = "ru.microsoft", - /** Lucene analyzer for Russian. */ - RuLucene = "ru.lucene", - /** Microsoft analyzer for Serbian (Cyrillic). */ - SrCyrillicMicrosoft = "sr-cyrillic.microsoft", - /** Microsoft analyzer for Serbian (Latin). */ - SrLatinMicrosoft = "sr-latin.microsoft", - /** Microsoft analyzer for Slovak. */ - SkMicrosoft = "sk.microsoft", - /** Microsoft analyzer for Slovenian. */ - SlMicrosoft = "sl.microsoft", - /** Microsoft analyzer for Spanish. */ - EsMicrosoft = "es.microsoft", - /** Lucene analyzer for Spanish. */ - EsLucene = "es.lucene", - /** Microsoft analyzer for Swedish. */ - SvMicrosoft = "sv.microsoft", - /** Lucene analyzer for Swedish. */ - SvLucene = "sv.lucene", - /** Microsoft analyzer for Tamil. */ - TaMicrosoft = "ta.microsoft", - /** Microsoft analyzer for Telugu. */ - TeMicrosoft = "te.microsoft", - /** Microsoft analyzer for Thai. */ - ThMicrosoft = "th.microsoft", - /** Lucene analyzer for Thai. */ - ThLucene = "th.lucene", - /** Microsoft analyzer for Turkish. */ - TrMicrosoft = "tr.microsoft", - /** Lucene analyzer for Turkish. */ - TrLucene = "tr.lucene", - /** Microsoft analyzer for Ukrainian. */ - UkMicrosoft = "uk.microsoft", - /** Microsoft analyzer for Urdu. */ - UrMicrosoft = "ur.microsoft", - /** Microsoft analyzer for Vietnamese. */ - ViMicrosoft = "vi.microsoft", - /** Standard Lucene analyzer. */ - StandardLucene = "standard.lucene", - /** Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */ - StandardAsciiFoldingLucene = "standardasciifolding.lucene", - /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */ - Keyword = "keyword", - /** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */ - Pattern = "pattern", - /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */ - Simple = "simple", - /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */ - Stop = "stop", - /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */ - Whitespace = "whitespace", -} - -/** - * Defines values for LexicalAnalyzerName. \ - * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **ar.microsoft**: Microsoft analyzer for Arabic. \ - * **ar.lucene**: Lucene analyzer for Arabic. \ - * **hy.lucene**: Lucene analyzer for Armenian. \ - * **bn.microsoft**: Microsoft analyzer for Bangla. \ - * **eu.lucene**: Lucene analyzer for Basque. \ - * **bg.microsoft**: Microsoft analyzer for Bulgarian. \ - * **bg.lucene**: Lucene analyzer for Bulgarian. \ - * **ca.microsoft**: Microsoft analyzer for Catalan. \ - * **ca.lucene**: Lucene analyzer for Catalan. \ - * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \ - * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \ - * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \ - * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \ - * **hr.microsoft**: Microsoft analyzer for Croatian. \ - * **cs.microsoft**: Microsoft analyzer for Czech. \ - * **cs.lucene**: Lucene analyzer for Czech. \ - * **da.microsoft**: Microsoft analyzer for Danish. \ - * **da.lucene**: Lucene analyzer for Danish. \ - * **nl.microsoft**: Microsoft analyzer for Dutch. \ - * **nl.lucene**: Lucene analyzer for Dutch. \ - * **en.microsoft**: Microsoft analyzer for English. \ - * **en.lucene**: Lucene analyzer for English. \ - * **et.microsoft**: Microsoft analyzer for Estonian. \ - * **fi.microsoft**: Microsoft analyzer for Finnish. \ - * **fi.lucene**: Lucene analyzer for Finnish. \ - * **fr.microsoft**: Microsoft analyzer for French. \ - * **fr.lucene**: Lucene analyzer for French. \ - * **gl.lucene**: Lucene analyzer for Galician. \ - * **de.microsoft**: Microsoft analyzer for German. \ - * **de.lucene**: Lucene analyzer for German. \ - * **el.microsoft**: Microsoft analyzer for Greek. \ - * **el.lucene**: Lucene analyzer for Greek. \ - * **gu.microsoft**: Microsoft analyzer for Gujarati. \ - * **he.microsoft**: Microsoft analyzer for Hebrew. \ - * **hi.microsoft**: Microsoft analyzer for Hindi. \ - * **hi.lucene**: Lucene analyzer for Hindi. \ - * **hu.microsoft**: Microsoft analyzer for Hungarian. \ - * **hu.lucene**: Lucene analyzer for Hungarian. \ - * **is.microsoft**: Microsoft analyzer for Icelandic. \ - * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \ - * **id.lucene**: Lucene analyzer for Indonesian. \ - * **ga.lucene**: Lucene analyzer for Irish. \ - * **it.microsoft**: Microsoft analyzer for Italian. \ - * **it.lucene**: Lucene analyzer for Italian. \ - * **ja.microsoft**: Microsoft analyzer for Japanese. \ - * **ja.lucene**: Lucene analyzer for Japanese. \ - * **kn.microsoft**: Microsoft analyzer for Kannada. \ - * **ko.microsoft**: Microsoft analyzer for Korean. \ - * **ko.lucene**: Lucene analyzer for Korean. \ - * **lv.microsoft**: Microsoft analyzer for Latvian. \ - * **lv.lucene**: Lucene analyzer for Latvian. \ - * **lt.microsoft**: Microsoft analyzer for Lithuanian. \ - * **ml.microsoft**: Microsoft analyzer for Malayalam. \ - * **ms.microsoft**: Microsoft analyzer for Malay (Latin). \ - * **mr.microsoft**: Microsoft analyzer for Marathi. \ - * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \ - * **no.lucene**: Lucene analyzer for Norwegian. \ - * **fa.lucene**: Lucene analyzer for Persian. \ - * **pl.microsoft**: Microsoft analyzer for Polish. \ - * **pl.lucene**: Lucene analyzer for Polish. \ - * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \ - * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \ - * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \ - * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \ - * **pa.microsoft**: Microsoft analyzer for Punjabi. \ - * **ro.microsoft**: Microsoft analyzer for Romanian. \ - * **ro.lucene**: Lucene analyzer for Romanian. \ - * **ru.microsoft**: Microsoft analyzer for Russian. \ - * **ru.lucene**: Lucene analyzer for Russian. \ - * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \ - * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \ - * **sk.microsoft**: Microsoft analyzer for Slovak. \ - * **sl.microsoft**: Microsoft analyzer for Slovenian. \ - * **es.microsoft**: Microsoft analyzer for Spanish. \ - * **es.lucene**: Lucene analyzer for Spanish. \ - * **sv.microsoft**: Microsoft analyzer for Swedish. \ - * **sv.lucene**: Lucene analyzer for Swedish. \ - * **ta.microsoft**: Microsoft analyzer for Tamil. \ - * **te.microsoft**: Microsoft analyzer for Telugu. \ - * **th.microsoft**: Microsoft analyzer for Thai. \ - * **th.lucene**: Lucene analyzer for Thai. \ - * **tr.microsoft**: Microsoft analyzer for Turkish. \ - * **tr.lucene**: Lucene analyzer for Turkish. \ - * **uk.microsoft**: Microsoft analyzer for Ukrainian. \ - * **ur.microsoft**: Microsoft analyzer for Urdu. \ - * **vi.microsoft**: Microsoft analyzer for Vietnamese. \ - * **standard.lucene**: Standard Lucene analyzer. \ - * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers \ - * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html \ - * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html \ - * **simple**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html \ - * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html \ - * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html - */ -export type LexicalAnalyzerName = string; - -/** Known values of {@link LexicalNormalizerName} that the service accepts. */ -export enum KnownLexicalNormalizerName { - /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ - AsciiFolding = "asciifolding", - /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ - Elision = "elision", - /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ - Lowercase = "lowercase", - /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ - Standard = "standard", - /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ - Uppercase = "uppercase", -} - -/** - * Defines values for LexicalNormalizerName. \ - * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ - * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ - * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ - * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ - * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html - */ -export type LexicalNormalizerName = string; - -/** Known values of {@link VectorEncodingFormat} that the service accepts. */ -export enum KnownVectorEncodingFormat { - /** Encoding format representing bits packed into a wider data type. */ - PackedBit = "packedBit", -} - -/** - * Defines values for VectorEncodingFormat. \ - * {@link KnownVectorEncodingFormat} can be used interchangeably with VectorEncodingFormat, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **packedBit**: Encoding format representing bits packed into a wider data type. - */ -export type VectorEncodingFormat = string; - -/** Known values of {@link RankingOrder} that the service accepts. */ -export enum KnownRankingOrder { - /** Sets sort order as BoostedRerankerScore */ - BoostedRerankerScore = "BoostedRerankerScore", - /** Sets sort order as ReRankerScore */ - ReRankerScore = "RerankerScore", -} - -/** - * Defines values for RankingOrder. \ - * {@link KnownRankingOrder} can be used interchangeably with RankingOrder, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \ - * **RerankerScore**: Sets sort order as ReRankerScore - */ -export type RankingOrder = string; - -/** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */ -export enum KnownVectorSearchAlgorithmKind { - /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */ - Hnsw = "hnsw", - /** Exhaustive KNN algorithm which will perform brute-force search. */ - ExhaustiveKnn = "exhaustiveKnn", -} - -/** - * Defines values for VectorSearchAlgorithmKind. \ - * {@link KnownVectorSearchAlgorithmKind} can be used interchangeably with VectorSearchAlgorithmKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **hnsw**: HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. \ - * **exhaustiveKnn**: Exhaustive KNN algorithm which will perform brute-force search. - */ -export type VectorSearchAlgorithmKind = string; - -/** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */ -export enum KnownVectorSearchVectorizerKind { - /** Generate embeddings using an Azure OpenAI resource at query time. */ - AzureOpenAI = "azureOpenAI", - /** Generate embeddings using a custom web endpoint at query time. */ - CustomWebApi = "customWebApi", - /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */ - AIServicesVision = "aiServicesVision", - /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. */ - AML = "aml", -} - -/** - * Defines values for VectorSearchVectorizerKind. \ - * {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \ - * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \ - * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \ - * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. - */ -export type VectorSearchVectorizerKind = string; - -/** Known values of {@link VectorSearchCompressionKind} that the service accepts. */ -export enum KnownVectorSearchCompressionKind { - /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */ - ScalarQuantization = "scalarQuantization", - /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */ - BinaryQuantization = "binaryQuantization", -} - -/** - * Defines values for VectorSearchCompressionKind. \ - * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \ - * **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. - */ -export type VectorSearchCompressionKind = string; - -/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */ -export enum KnownVectorSearchCompressionRescoreStorageMethod { - /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */ - PreserveOriginals = "preserveOriginals", - /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */ - DiscardOriginals = "discardOriginals", -} - -/** - * Defines values for VectorSearchCompressionRescoreStorageMethod. \ - * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \ - * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. - */ -export type VectorSearchCompressionRescoreStorageMethod = string; - -/** Known values of {@link SearchIndexPermissionFilterOption} that the service accepts. */ -export enum KnownSearchIndexPermissionFilterOption { - /** Enabled */ - Enabled = "enabled", - /** Disabled */ - Disabled = "disabled", -} - -/** - * Defines values for SearchIndexPermissionFilterOption. \ - * {@link KnownSearchIndexPermissionFilterOption} can be used interchangeably with SearchIndexPermissionFilterOption, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **enabled** \ - * **disabled** - */ -export type SearchIndexPermissionFilterOption = string; - -/** Known values of {@link AzureOpenAIModelName} that the service accepts. */ -export enum KnownAzureOpenAIModelName { - /** TextEmbeddingAda002 */ - TextEmbeddingAda002 = "text-embedding-ada-002", - /** TextEmbedding3Large */ - TextEmbedding3Large = "text-embedding-3-large", - /** TextEmbedding3Small */ - TextEmbedding3Small = "text-embedding-3-small", - /** Gpt4O */ - Gpt4O = "gpt-4o", - /** Gpt4OMini */ - Gpt4OMini = "gpt-4o-mini", - /** Gpt41 */ - Gpt41 = "gpt-4.1", - /** Gpt41Mini */ - Gpt41Mini = "gpt-4.1-mini", - /** Gpt41Nano */ - Gpt41Nano = "gpt-4.1-nano", - /** Gpt5 */ - Gpt5 = "gpt-5", - /** Gpt5Mini */ - Gpt5Mini = "gpt-5-mini", - /** Gpt5Nano */ - Gpt5Nano = "gpt-5-nano", -} - -/** - * Defines values for AzureOpenAIModelName. \ - * {@link KnownAzureOpenAIModelName} can be used interchangeably with AzureOpenAIModelName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **text-embedding-ada-002** \ - * **text-embedding-3-large** \ - * **text-embedding-3-small** \ - * **gpt-4o** \ - * **gpt-4o-mini** \ - * **gpt-4.1** \ - * **gpt-4.1-mini** \ - * **gpt-4.1-nano** \ - * **gpt-5** \ - * **gpt-5-mini** \ - * **gpt-5-nano** - */ -export type AzureOpenAIModelName = string; - -/** Known values of {@link KnowledgeSourceIngestionPermissionOption} that the service accepts. */ -export enum KnownKnowledgeSourceIngestionPermissionOption { - /** Ingest explicit user identifiers alongside document content. */ - UserIds = "userIds", - /** Ingest group identifiers alongside document content. */ - GroupIds = "groupIds", - /** Ingest RBAC scope information alongside document content. */ - RbacScope = "rbacScope", -} - -/** - * Defines values for KnowledgeSourceIngestionPermissionOption. \ - * {@link KnownKnowledgeSourceIngestionPermissionOption} can be used interchangeably with KnowledgeSourceIngestionPermissionOption, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **userIds**: Ingest explicit user identifiers alongside document content. \ - * **groupIds**: Ingest group identifiers alongside document content. \ - * **rbacScope**: Ingest RBAC scope information alongside document content. - */ -export type KnowledgeSourceIngestionPermissionOption = string; - -/** Known values of {@link KnowledgeSourceContentExtractionMode} that the service accepts. */ -export enum KnownKnowledgeSourceContentExtractionMode { - /** Extracts only essential metadata while deferring most content processing. */ - Minimal = "minimal", - /** Performs the full default content extraction pipeline. */ - Standard = "standard", -} - -/** - * Defines values for KnowledgeSourceContentExtractionMode. \ - * {@link KnownKnowledgeSourceContentExtractionMode} can be used interchangeably with KnowledgeSourceContentExtractionMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **minimal**: Extracts only essential metadata while deferring most content processing. \ - * **standard**: Performs the full default content extraction pipeline. - */ -export type KnowledgeSourceContentExtractionMode = string; - -/** Known values of {@link IndexedSharePointContainerName} that the service accepts. */ -export enum KnownIndexedSharePointContainerName { - /** Index content from the site's default document library. */ - DefaultSiteLibrary = "defaultSiteLibrary", - /** Index content from every document library in the site. */ - AllSiteLibraries = "allSiteLibraries", - /** Index only content that matches the query specified in the knowledge source. */ - UseQuery = "useQuery", -} - -/** - * Defines values for IndexedSharePointContainerName. \ - * {@link KnownIndexedSharePointContainerName} can be used interchangeably with IndexedSharePointContainerName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **defaultSiteLibrary**: Index content from the site's default document library. \ - * **allSiteLibraries**: Index content from every document library in the site. \ - * **useQuery**: Index only content that matches the query specified in the knowledge source. - */ -export type IndexedSharePointContainerName = string; - -/** Known values of {@link TokenFilterName} that the service accepts. */ -export enum KnownTokenFilterName { - /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */ - ArabicNormalization = "arabic_normalization", - /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */ - Apostrophe = "apostrophe", - /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ - AsciiFolding = "asciifolding", - /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */ - CjkBigram = "cjk_bigram", - /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */ - CjkWidth = "cjk_width", - /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */ - Classic = "classic", - /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */ - CommonGram = "common_grams", - /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */ - EdgeNGram = "edgeNGram_v2", - /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ - Elision = "elision", - /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */ - GermanNormalization = "german_normalization", - /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */ - HindiNormalization = "hindi_normalization", - /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */ - IndicNormalization = "indic_normalization", - /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */ - KeywordRepeat = "keyword_repeat", - /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */ - KStem = "kstem", - /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */ - Length = "length", - /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */ - Limit = "limit", - /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ - Lowercase = "lowercase", - /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */ - NGram = "nGram_v2", - /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */ - PersianNormalization = "persian_normalization", - /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */ - Phonetic = "phonetic", - /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */ - PorterStem = "porter_stem", - /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ - Reverse = "reverse", - /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */ - ScandinavianNormalization = "scandinavian_normalization", - /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */ - ScandinavianFoldingNormalization = "scandinavian_folding", - /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */ - Shingle = "shingle", - /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */ - Snowball = "snowball", - /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */ - SoraniNormalization = "sorani_normalization", - /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */ - Stemmer = "stemmer", - /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */ - Stopwords = "stopwords", - /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */ - Trim = "trim", - /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */ - Truncate = "truncate", - /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */ - Unique = "unique", - /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ - Uppercase = "uppercase", - /** Splits words into subwords and performs optional transformations on subword groups. */ - WordDelimiter = "word_delimiter", -} - -/** - * Defines values for TokenFilterName. \ - * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ - * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ - * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ - * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ - * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ - * **classic**: Removes English possessives, and dots from acronyms. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ - * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ - * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ - * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ - * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ - * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ - * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ - * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ - * **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ - * **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ - * **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ - * **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ - * **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ - * **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ - * **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ - * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\/\/tartarus.org\/~martin\/PorterStemmer \ - * **reverse**: Reverses the token string. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ - * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ - * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ - * **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ - * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ - * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ - * **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ - * **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ - * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ - * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ - * **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ - * **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ - * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. - */ -export type TokenFilterName = string; - -/** Known values of {@link CharFilterName} that the service accepts. */ -export enum KnownCharFilterName { - /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */ - HtmlStrip = "html_strip", -} - -/** - * Defines values for CharFilterName. \ - * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html - */ -export type CharFilterName = string; - -/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */ -export enum KnownVectorSearchAlgorithmMetric { - /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */ - Cosine = "cosine", - /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */ - Euclidean = "euclidean", - /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */ - DotProduct = "dotProduct", - /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */ - Hamming = "hamming", -} - -/** - * Defines values for VectorSearchAlgorithmMetric. \ - * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \ - * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \ - * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \ - * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. - */ -export type VectorSearchAlgorithmMetric = string; - -/** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */ -export enum KnownVectorSearchCompressionTarget { - /** Int8 */ - Int8 = "int8", -} - -/** - * Defines values for VectorSearchCompressionTarget. \ - * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **int8** - */ -export type VectorSearchCompressionTarget = string; - -/** Known values of {@link AIFoundryModelCatalogName} that the service accepts. */ -export enum KnownAIFoundryModelCatalogName { - /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */ - OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", - /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */ - OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", - /** FacebookDinoV2ImageEmbeddingsViTBase */ - FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", - /** FacebookDinoV2ImageEmbeddingsViTGiant */ - FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", - /** CohereEmbedV3English */ - CohereEmbedV3English = "Cohere-embed-v3-english", - /** CohereEmbedV3Multilingual */ - CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", - /** Cohere embed v4 model for generating embeddings from both text and images. */ - CohereEmbedV4 = "Cohere-embed-v4", -} - -/** - * Defines values for AIFoundryModelCatalogName. \ - * {@link KnownAIFoundryModelCatalogName} can be used interchangeably with AIFoundryModelCatalogName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32** \ - * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336** \ - * **Facebook-DinoV2-Image-Embeddings-ViT-Base** \ - * **Facebook-DinoV2-Image-Embeddings-ViT-Giant** \ - * **Cohere-embed-v3-english** \ - * **Cohere-embed-v3-multilingual** \ - * **Cohere-embed-v4**: Cohere embed v4 model for generating embeddings from both text and images. - */ -export type AIFoundryModelCatalogName = string; - -/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */ -export enum KnownKeyPhraseExtractionSkillLanguage { - /** Danish */ - Da = "da", - /** Dutch */ - Nl = "nl", - /** English */ - En = "en", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** German */ - De = "de", - /** Italian */ - It = "it", - /** Japanese */ - Ja = "ja", - /** Korean */ - Ko = "ko", - /** Norwegian (Bokmaal) */ - No = "no", - /** Polish */ - Pl = "pl", - /** Portuguese (Portugal) */ - PtPT = "pt-PT", - /** Portuguese (Brazil) */ - PtBR = "pt-BR", - /** Russian */ - Ru = "ru", - /** Spanish */ - Es = "es", - /** Swedish */ - Sv = "sv", -} - -/** - * Defines values for KeyPhraseExtractionSkillLanguage. \ - * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **da**: Danish \ - * **nl**: Dutch \ - * **en**: English \ - * **fi**: Finnish \ - * **fr**: French \ - * **de**: German \ - * **it**: Italian \ - * **ja**: Japanese \ - * **ko**: Korean \ - * **no**: Norwegian (Bokmaal) \ - * **pl**: Polish \ - * **pt-PT**: Portuguese (Portugal) \ - * **pt-BR**: Portuguese (Brazil) \ - * **ru**: Russian \ - * **es**: Spanish \ - * **sv**: Swedish - */ -export type KeyPhraseExtractionSkillLanguage = string; - -/** Known values of {@link OcrSkillLanguage} that the service accepts. */ -export enum KnownOcrSkillLanguage { - /** Afrikaans */ - Af = "af", - /** Albanian */ - Sq = "sq", - /** Angika (Devanagiri) */ - Anp = "anp", - /** Arabic */ - Ar = "ar", - /** Asturian */ - Ast = "ast", - /** Awadhi-Hindi (Devanagiri) */ - Awa = "awa", - /** Azerbaijani (Latin) */ - Az = "az", - /** Bagheli */ - Bfy = "bfy", - /** Basque */ - Eu = "eu", - /** Belarusian (Cyrillic and Latin) */ - Be = "be", - /** Belarusian (Cyrillic) */ - BeCyrl = "be-cyrl", - /** Belarusian (Latin) */ - BeLatn = "be-latn", - /** Bhojpuri-Hindi (Devanagiri) */ - Bho = "bho", - /** Bislama */ - Bi = "bi", - /** Bodo (Devanagiri) */ - Brx = "brx", - /** Bosnian Latin */ - Bs = "bs", - /** Brajbha */ - Bra = "bra", - /** Breton */ - Br = "br", - /** Bulgarian */ - Bg = "bg", - /** Bundeli */ - Bns = "bns", - /** Buryat (Cyrillic) */ - Bua = "bua", - /** Catalan */ - Ca = "ca", - /** Cebuano */ - Ceb = "ceb", - /** Chamling */ - Rab = "rab", - /** Chamorro */ - Ch = "ch", - /** Chhattisgarhi (Devanagiri) */ - Hne = "hne", - /** Chinese Simplified */ - ZhHans = "zh-Hans", - /** Chinese Traditional */ - ZhHant = "zh-Hant", - /** Cornish */ - Kw = "kw", - /** Corsican */ - Co = "co", - /** Crimean Tatar (Latin) */ - Crh = "crh", - /** Croatian */ - Hr = "hr", - /** Czech */ - Cs = "cs", - /** Danish */ - Da = "da", - /** Dari */ - Prs = "prs", - /** Dhimal (Devanagiri) */ - Dhi = "dhi", - /** Dogri (Devanagiri) */ - Doi = "doi", - /** Dutch */ - Nl = "nl", - /** English */ - En = "en", - /** Erzya (Cyrillic) */ - Myv = "myv", - /** Estonian */ - Et = "et", - /** Faroese */ - Fo = "fo", - /** Fijian */ - Fj = "fj", - /** Filipino */ - Fil = "fil", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** Frulian */ - Fur = "fur", - /** Gagauz (Latin) */ - Gag = "gag", - /** Galician */ - Gl = "gl", - /** German */ - De = "de", - /** Gilbertese */ - Gil = "gil", - /** Gondi (Devanagiri) */ - Gon = "gon", - /** Greek */ - El = "el", - /** Greenlandic */ - Kl = "kl", - /** Gurung (Devanagiri) */ - Gvr = "gvr", - /** Haitian Creole */ - Ht = "ht", - /** Halbi (Devanagiri) */ - Hlb = "hlb", - /** Hani */ - Hni = "hni", - /** Haryanvi */ - Bgc = "bgc", - /** Hawaiian */ - Haw = "haw", - /** Hindi */ - Hi = "hi", - /** Hmong Daw (Latin) */ - Mww = "mww", - /** Ho (Devanagiri) */ - Hoc = "hoc", - /** Hungarian */ - Hu = "hu", - /** Icelandic */ - Is = "is", - /** Inari Sami */ - Smn = "smn", - /** Indonesian */ - Id = "id", - /** Interlingua */ - Ia = "ia", - /** Inuktitut (Latin) */ - Iu = "iu", - /** Irish */ - Ga = "ga", - /** Italian */ - It = "it", - /** Japanese */ - Ja = "ja", - /** Jaunsari (Devanagiri) */ - Jns = "Jns", - /** Javanese */ - Jv = "jv", - /** Kabuverdianu */ - Kea = "kea", - /** Kachin (Latin) */ - Kac = "kac", - /** Kangri (Devanagiri) */ - Xnr = "xnr", - /** Karachay-Balkar */ - Krc = "krc", - /** Kara-Kalpak (Cyrillic) */ - KaaCyrl = "kaa-cyrl", - /** Kara-Kalpak (Latin) */ - Kaa = "kaa", - /** Kashubian */ - Csb = "csb", - /** Kazakh (Cyrillic) */ - KkCyrl = "kk-cyrl", - /** Kazakh (Latin) */ - KkLatn = "kk-latn", - /** Khaling */ - Klr = "klr", - /** Khasi */ - Kha = "kha", - /** K'iche' */ - Quc = "quc", - /** Korean */ - Ko = "ko", - /** Korku */ - Kfq = "kfq", - /** Koryak */ - Kpy = "kpy", - /** Kosraean */ - Kos = "kos", - /** Kumyk (Cyrillic) */ - Kum = "kum", - /** Kurdish (Arabic) */ - KuArab = "ku-arab", - /** Kurdish (Latin) */ - KuLatn = "ku-latn", - /** Kurukh (Devanagiri) */ - Kru = "kru", - /** Kyrgyz (Cyrillic) */ - Ky = "ky", - /** Lakota */ - Lkt = "lkt", - /** Latin */ - La = "la", - /** Lithuanian */ - Lt = "lt", - /** Lower Sorbian */ - Dsb = "dsb", - /** Lule Sami */ - Smj = "smj", - /** Luxembourgish */ - Lb = "lb", - /** Mahasu Pahari (Devanagiri) */ - Bfz = "bfz", - /** Malay (Latin) */ - Ms = "ms", - /** Maltese */ - Mt = "mt", - /** Malto (Devanagiri) */ - Kmj = "kmj", - /** Manx */ - Gv = "gv", - /** Maori */ - Mi = "mi", - /** Marathi */ - Mr = "mr", - /** Mongolian (Cyrillic) */ - Mn = "mn", - /** Montenegrin (Cyrillic) */ - CnrCyrl = "cnr-cyrl", - /** Montenegrin (Latin) */ - CnrLatn = "cnr-latn", - /** Neapolitan */ - Nap = "nap", - /** Nepali */ - Ne = "ne", - /** Niuean */ - Niu = "niu", - /** Nogay */ - Nog = "nog", - /** Northern Sami (Latin) */ - Sme = "sme", - /** Norwegian */ - Nb = "nb", - /** Norwegian */ - No = "no", - /** Occitan */ - Oc = "oc", - /** Ossetic */ - Os = "os", - /** Pashto */ - Ps = "ps", - /** Persian */ - Fa = "fa", - /** Polish */ - Pl = "pl", - /** Portuguese */ - Pt = "pt", - /** Punjabi (Arabic) */ - Pa = "pa", - /** Ripuarian */ - Ksh = "ksh", - /** Romanian */ - Ro = "ro", - /** Romansh */ - Rm = "rm", - /** Russian */ - Ru = "ru", - /** Sadri (Devanagiri) */ - Sck = "sck", - /** Samoan (Latin) */ - Sm = "sm", - /** Sanskrit (Devanagiri) */ - Sa = "sa", - /** Santali (Devanagiri) */ - Sat = "sat", - /** Scots */ - Sco = "sco", - /** Scottish Gaelic */ - Gd = "gd", - /** Serbian (Latin) */ - Sr = "sr", - /** Serbian (Cyrillic) */ - SrCyrl = "sr-Cyrl", - /** Serbian (Latin) */ - SrLatn = "sr-Latn", - /** Sherpa (Devanagiri) */ - Xsr = "xsr", - /** Sirmauri (Devanagiri) */ - Srx = "srx", - /** Skolt Sami */ - Sms = "sms", - /** Slovak */ - Sk = "sk", - /** Slovenian */ - Sl = "sl", - /** Somali (Arabic) */ - So = "so", - /** Southern Sami */ - Sma = "sma", - /** Spanish */ - Es = "es", - /** Swahili (Latin) */ - Sw = "sw", - /** Swedish */ - Sv = "sv", - /** Tajik (Cyrillic) */ - Tg = "tg", - /** Tatar (Latin) */ - Tt = "tt", - /** Tetum */ - Tet = "tet", - /** Thangmi */ - Thf = "thf", - /** Tongan */ - To = "to", - /** Turkish */ - Tr = "tr", - /** Turkmen (Latin) */ - Tk = "tk", - /** Tuvan */ - Tyv = "tyv", - /** Upper Sorbian */ - Hsb = "hsb", - /** Urdu */ - Ur = "ur", - /** Uyghur (Arabic) */ - Ug = "ug", - /** Uzbek (Arabic) */ - UzArab = "uz-arab", - /** Uzbek (Cyrillic) */ - UzCyrl = "uz-cyrl", - /** Uzbek (Latin) */ - Uz = "uz", - /** Volapük */ - Vo = "vo", - /** Walser */ - Wae = "wae", - /** Welsh */ - Cy = "cy", - /** Western Frisian */ - Fy = "fy", - /** Yucatec Maya */ - Yua = "yua", - /** Zhuang */ - Za = "za", - /** Zulu */ - Zu = "zu", - /** Unknown (All) */ - Unk = "unk", -} - -/** - * Defines values for OcrSkillLanguage. \ - * {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **af**: Afrikaans \ - * **sq**: Albanian \ - * **anp**: Angika (Devanagiri) \ - * **ar**: Arabic \ - * **ast**: Asturian \ - * **awa**: Awadhi-Hindi (Devanagiri) \ - * **az**: Azerbaijani (Latin) \ - * **bfy**: Bagheli \ - * **eu**: Basque \ - * **be**: Belarusian (Cyrillic and Latin) \ - * **be-cyrl**: Belarusian (Cyrillic) \ - * **be-latn**: Belarusian (Latin) \ - * **bho**: Bhojpuri-Hindi (Devanagiri) \ - * **bi**: Bislama \ - * **brx**: Bodo (Devanagiri) \ - * **bs**: Bosnian Latin \ - * **bra**: Brajbha \ - * **br**: Breton \ - * **bg**: Bulgarian \ - * **bns**: Bundeli \ - * **bua**: Buryat (Cyrillic) \ - * **ca**: Catalan \ - * **ceb**: Cebuano \ - * **rab**: Chamling \ - * **ch**: Chamorro \ - * **hne**: Chhattisgarhi (Devanagiri) \ - * **zh-Hans**: Chinese Simplified \ - * **zh-Hant**: Chinese Traditional \ - * **kw**: Cornish \ - * **co**: Corsican \ - * **crh**: Crimean Tatar (Latin) \ - * **hr**: Croatian \ - * **cs**: Czech \ - * **da**: Danish \ - * **prs**: Dari \ - * **dhi**: Dhimal (Devanagiri) \ - * **doi**: Dogri (Devanagiri) \ - * **nl**: Dutch \ - * **en**: English \ - * **myv**: Erzya (Cyrillic) \ - * **et**: Estonian \ - * **fo**: Faroese \ - * **fj**: Fijian \ - * **fil**: Filipino \ - * **fi**: Finnish \ - * **fr**: French \ - * **fur**: Frulian \ - * **gag**: Gagauz (Latin) \ - * **gl**: Galician \ - * **de**: German \ - * **gil**: Gilbertese \ - * **gon**: Gondi (Devanagiri) \ - * **el**: Greek \ - * **kl**: Greenlandic \ - * **gvr**: Gurung (Devanagiri) \ - * **ht**: Haitian Creole \ - * **hlb**: Halbi (Devanagiri) \ - * **hni**: Hani \ - * **bgc**: Haryanvi \ - * **haw**: Hawaiian \ - * **hi**: Hindi \ - * **mww**: Hmong Daw (Latin) \ - * **hoc**: Ho (Devanagiri) \ - * **hu**: Hungarian \ - * **is**: Icelandic \ - * **smn**: Inari Sami \ - * **id**: Indonesian \ - * **ia**: Interlingua \ - * **iu**: Inuktitut (Latin) \ - * **ga**: Irish \ - * **it**: Italian \ - * **ja**: Japanese \ - * **Jns**: Jaunsari (Devanagiri) \ - * **jv**: Javanese \ - * **kea**: Kabuverdianu \ - * **kac**: Kachin (Latin) \ - * **xnr**: Kangri (Devanagiri) \ - * **krc**: Karachay-Balkar \ - * **kaa-cyrl**: Kara-Kalpak (Cyrillic) \ - * **kaa**: Kara-Kalpak (Latin) \ - * **csb**: Kashubian \ - * **kk-cyrl**: Kazakh (Cyrillic) \ - * **kk-latn**: Kazakh (Latin) \ - * **klr**: Khaling \ - * **kha**: Khasi \ - * **quc**: K'iche' \ - * **ko**: Korean \ - * **kfq**: Korku \ - * **kpy**: Koryak \ - * **kos**: Kosraean \ - * **kum**: Kumyk (Cyrillic) \ - * **ku-arab**: Kurdish (Arabic) \ - * **ku-latn**: Kurdish (Latin) \ - * **kru**: Kurukh (Devanagiri) \ - * **ky**: Kyrgyz (Cyrillic) \ - * **lkt**: Lakota \ - * **la**: Latin \ - * **lt**: Lithuanian \ - * **dsb**: Lower Sorbian \ - * **smj**: Lule Sami \ - * **lb**: Luxembourgish \ - * **bfz**: Mahasu Pahari (Devanagiri) \ - * **ms**: Malay (Latin) \ - * **mt**: Maltese \ - * **kmj**: Malto (Devanagiri) \ - * **gv**: Manx \ - * **mi**: Maori \ - * **mr**: Marathi \ - * **mn**: Mongolian (Cyrillic) \ - * **cnr-cyrl**: Montenegrin (Cyrillic) \ - * **cnr-latn**: Montenegrin (Latin) \ - * **nap**: Neapolitan \ - * **ne**: Nepali \ - * **niu**: Niuean \ - * **nog**: Nogay \ - * **sme**: Northern Sami (Latin) \ - * **nb**: Norwegian \ - * **no**: Norwegian \ - * **oc**: Occitan \ - * **os**: Ossetic \ - * **ps**: Pashto \ - * **fa**: Persian \ - * **pl**: Polish \ - * **pt**: Portuguese \ - * **pa**: Punjabi (Arabic) \ - * **ksh**: Ripuarian \ - * **ro**: Romanian \ - * **rm**: Romansh \ - * **ru**: Russian \ - * **sck**: Sadri (Devanagiri) \ - * **sm**: Samoan (Latin) \ - * **sa**: Sanskrit (Devanagiri) \ - * **sat**: Santali (Devanagiri) \ - * **sco**: Scots \ - * **gd**: Scottish Gaelic \ - * **sr**: Serbian (Latin) \ - * **sr-Cyrl**: Serbian (Cyrillic) \ - * **sr-Latn**: Serbian (Latin) \ - * **xsr**: Sherpa (Devanagiri) \ - * **srx**: Sirmauri (Devanagiri) \ - * **sms**: Skolt Sami \ - * **sk**: Slovak \ - * **sl**: Slovenian \ - * **so**: Somali (Arabic) \ - * **sma**: Southern Sami \ - * **es**: Spanish \ - * **sw**: Swahili (Latin) \ - * **sv**: Swedish \ - * **tg**: Tajik (Cyrillic) \ - * **tt**: Tatar (Latin) \ - * **tet**: Tetum \ - * **thf**: Thangmi \ - * **to**: Tongan \ - * **tr**: Turkish \ - * **tk**: Turkmen (Latin) \ - * **tyv**: Tuvan \ - * **hsb**: Upper Sorbian \ - * **ur**: Urdu \ - * **ug**: Uyghur (Arabic) \ - * **uz-arab**: Uzbek (Arabic) \ - * **uz-cyrl**: Uzbek (Cyrillic) \ - * **uz**: Uzbek (Latin) \ - * **vo**: Volapük \ - * **wae**: Walser \ - * **cy**: Welsh \ - * **fy**: Western Frisian \ - * **yua**: Yucatec Maya \ - * **za**: Zhuang \ - * **zu**: Zulu \ - * **unk**: Unknown (All) - */ -export type OcrSkillLanguage = string; - -/** Known values of {@link OcrLineEnding} that the service accepts. */ -export enum KnownOcrLineEnding { - /** Lines are separated by a single space character. */ - Space = "space", - /** Lines are separated by a carriage return ('\r') character. */ - CarriageReturn = "carriageReturn", - /** Lines are separated by a single line feed ('\n') character. */ - LineFeed = "lineFeed", - /** Lines are separated by a carriage return and a line feed ('\r\n') character. */ - CarriageReturnLineFeed = "carriageReturnLineFeed", -} - -/** - * Defines values for OcrLineEnding. \ - * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **space**: Lines are separated by a single space character. \ - * **carriageReturn**: Lines are separated by a carriage return ('\r') character. \ - * **lineFeed**: Lines are separated by a single line feed ('\n') character. \ - * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\r\n') character. - */ -export type OcrLineEnding = string; - -/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */ -export enum KnownImageAnalysisSkillLanguage { - /** Arabic */ - Ar = "ar", - /** Azerbaijani */ - Az = "az", - /** Bulgarian */ - Bg = "bg", - /** Bosnian Latin */ - Bs = "bs", - /** Catalan */ - Ca = "ca", - /** Czech */ - Cs = "cs", - /** Welsh */ - Cy = "cy", - /** Danish */ - Da = "da", - /** German */ - De = "de", - /** Greek */ - El = "el", - /** English */ - En = "en", - /** Spanish */ - Es = "es", - /** Estonian */ - Et = "et", - /** Basque */ - Eu = "eu", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** Irish */ - Ga = "ga", - /** Galician */ - Gl = "gl", - /** Hebrew */ - He = "he", - /** Hindi */ - Hi = "hi", - /** Croatian */ - Hr = "hr", - /** Hungarian */ - Hu = "hu", - /** Indonesian */ - Id = "id", - /** Italian */ - It = "it", - /** Japanese */ - Ja = "ja", - /** Kazakh */ - Kk = "kk", - /** Korean */ - Ko = "ko", - /** Lithuanian */ - Lt = "lt", - /** Latvian */ - Lv = "lv", - /** Macedonian */ - Mk = "mk", - /** Malay Malaysia */ - Ms = "ms", - /** Norwegian (Bokmal) */ - Nb = "nb", - /** Dutch */ - Nl = "nl", - /** Polish */ - Pl = "pl", - /** Dari */ - Prs = "prs", - /** Portuguese-Brazil */ - PtBR = "pt-BR", - /** Portuguese-Portugal */ - Pt = "pt", - /** Portuguese-Portugal */ - PtPT = "pt-PT", - /** Romanian */ - Ro = "ro", - /** Russian */ - Ru = "ru", - /** Slovak */ - Sk = "sk", - /** Slovenian */ - Sl = "sl", - /** Serbian - Cyrillic RS */ - SrCyrl = "sr-Cyrl", - /** Serbian - Latin RS */ - SrLatn = "sr-Latn", - /** Swedish */ - Sv = "sv", - /** Thai */ - Th = "th", - /** Turkish */ - Tr = "tr", - /** Ukrainian */ - Uk = "uk", - /** Vietnamese */ - Vi = "vi", - /** Chinese Simplified */ - Zh = "zh", - /** Chinese Simplified */ - ZhHans = "zh-Hans", - /** Chinese Traditional */ - ZhHant = "zh-Hant", -} - -/** - * Defines values for ImageAnalysisSkillLanguage. \ - * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **ar**: Arabic \ - * **az**: Azerbaijani \ - * **bg**: Bulgarian \ - * **bs**: Bosnian Latin \ - * **ca**: Catalan \ - * **cs**: Czech \ - * **cy**: Welsh \ - * **da**: Danish \ - * **de**: German \ - * **el**: Greek \ - * **en**: English \ - * **es**: Spanish \ - * **et**: Estonian \ - * **eu**: Basque \ - * **fi**: Finnish \ - * **fr**: French \ - * **ga**: Irish \ - * **gl**: Galician \ - * **he**: Hebrew \ - * **hi**: Hindi \ - * **hr**: Croatian \ - * **hu**: Hungarian \ - * **id**: Indonesian \ - * **it**: Italian \ - * **ja**: Japanese \ - * **kk**: Kazakh \ - * **ko**: Korean \ - * **lt**: Lithuanian \ - * **lv**: Latvian \ - * **mk**: Macedonian \ - * **ms**: Malay Malaysia \ - * **nb**: Norwegian (Bokmal) \ - * **nl**: Dutch \ - * **pl**: Polish \ - * **prs**: Dari \ - * **pt-BR**: Portuguese-Brazil \ - * **pt**: Portuguese-Portugal \ - * **pt-PT**: Portuguese-Portugal \ - * **ro**: Romanian \ - * **ru**: Russian \ - * **sk**: Slovak \ - * **sl**: Slovenian \ - * **sr-Cyrl**: Serbian - Cyrillic RS \ - * **sr-Latn**: Serbian - Latin RS \ - * **sv**: Swedish \ - * **th**: Thai \ - * **tr**: Turkish \ - * **uk**: Ukrainian \ - * **vi**: Vietnamese \ - * **zh**: Chinese Simplified \ - * **zh-Hans**: Chinese Simplified \ - * **zh-Hant**: Chinese Traditional - */ -export type ImageAnalysisSkillLanguage = string; - -/** Known values of {@link VisualFeature} that the service accepts. */ -export enum KnownVisualFeature { - /** Visual features recognized as adult persons. */ - Adult = "adult", - /** Visual features recognized as commercial brands. */ - Brands = "brands", - /** Categories. */ - Categories = "categories", - /** Description. */ - Description = "description", - /** Visual features recognized as people faces. */ - Faces = "faces", - /** Visual features recognized as objects. */ - Objects = "objects", - /** Tags. */ - Tags = "tags", -} - -/** - * Defines values for VisualFeature. \ - * {@link KnownVisualFeature} can be used interchangeably with VisualFeature, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **adult**: Visual features recognized as adult persons. \ - * **brands**: Visual features recognized as commercial brands. \ - * **categories**: Categories. \ - * **description**: Description. \ - * **faces**: Visual features recognized as people faces. \ - * **objects**: Visual features recognized as objects. \ - * **tags**: Tags. - */ -export type VisualFeature = string; - -/** Known values of {@link ImageDetail} that the service accepts. */ -export enum KnownImageDetail { - /** Details recognized as celebrities. */ - Celebrities = "celebrities", - /** Details recognized as landmarks. */ - Landmarks = "landmarks", -} - -/** - * Defines values for ImageDetail. \ - * {@link KnownImageDetail} can be used interchangeably with ImageDetail, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **celebrities**: Details recognized as celebrities. \ - * **landmarks**: Details recognized as landmarks. - */ -export type ImageDetail = string; - -/** Known values of {@link EntityCategory} that the service accepts. */ -export enum KnownEntityCategory { - /** Entities describing a physical location. */ - Location = "location", - /** Entities describing an organization. */ - Organization = "organization", - /** Entities describing a person. */ - Person = "person", - /** Entities describing a quantity. */ - Quantity = "quantity", - /** Entities describing a date and time. */ - Datetime = "datetime", - /** Entities describing a URL. */ - Url = "url", - /** Entities describing an email address. */ - Email = "email", -} - -/** - * Defines values for EntityCategory. \ - * {@link KnownEntityCategory} can be used interchangeably with EntityCategory, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **location**: Entities describing a physical location. \ - * **organization**: Entities describing an organization. \ - * **person**: Entities describing a person. \ - * **quantity**: Entities describing a quantity. \ - * **datetime**: Entities describing a date and time. \ - * **url**: Entities describing a URL. \ - * **email**: Entities describing an email address. - */ -export type EntityCategory = string; - -/** Known values of {@link EntityRecognitionSkillLanguage} that the service accepts. */ -export enum KnownEntityRecognitionSkillLanguage { - /** Arabic */ - Ar = "ar", - /** Czech */ - Cs = "cs", - /** Chinese-Simplified */ - ZhHans = "zh-Hans", - /** Chinese-Traditional */ - ZhHant = "zh-Hant", - /** Danish */ - Da = "da", - /** Dutch */ - Nl = "nl", - /** English */ - En = "en", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** German */ - De = "de", - /** Greek */ - El = "el", - /** Hungarian */ - Hu = "hu", - /** Italian */ - It = "it", - /** Japanese */ - Ja = "ja", - /** Korean */ - Ko = "ko", - /** Norwegian (Bokmaal) */ - No = "no", - /** Polish */ - Pl = "pl", - /** Portuguese (Portugal) */ - PtPT = "pt-PT", - /** Portuguese (Brazil) */ - PtBR = "pt-BR", - /** Russian */ - Ru = "ru", - /** Spanish */ - Es = "es", - /** Swedish */ - Sv = "sv", - /** Turkish */ - Tr = "tr", -} - -/** - * Defines values for EntityRecognitionSkillLanguage. \ - * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **ar**: Arabic \ - * **cs**: Czech \ - * **zh-Hans**: Chinese-Simplified \ - * **zh-Hant**: Chinese-Traditional \ - * **da**: Danish \ - * **nl**: Dutch \ - * **en**: English \ - * **fi**: Finnish \ - * **fr**: French \ - * **de**: German \ - * **el**: Greek \ - * **hu**: Hungarian \ - * **it**: Italian \ - * **ja**: Japanese \ - * **ko**: Korean \ - * **no**: Norwegian (Bokmaal) \ - * **pl**: Polish \ - * **pt-PT**: Portuguese (Portugal) \ - * **pt-BR**: Portuguese (Brazil) \ - * **ru**: Russian \ - * **es**: Spanish \ - * **sv**: Swedish \ - * **tr**: Turkish - */ -export type EntityRecognitionSkillLanguage = string; - -/** Known values of {@link SentimentSkillLanguage} that the service accepts. */ -export enum KnownSentimentSkillLanguage { - /** Danish */ - Da = "da", - /** Dutch */ - Nl = "nl", - /** English */ - En = "en", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** German */ - De = "de", - /** Greek */ - El = "el", - /** Italian */ - It = "it", - /** Norwegian (Bokmaal) */ - No = "no", - /** Polish */ - Pl = "pl", - /** Portuguese (Portugal) */ - PtPT = "pt-PT", - /** Russian */ - Ru = "ru", - /** Spanish */ - Es = "es", - /** Swedish */ - Sv = "sv", - /** Turkish */ - Tr = "tr", -} - -/** - * Defines values for SentimentSkillLanguage. \ - * {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **da**: Danish \ - * **nl**: Dutch \ - * **en**: English \ - * **fi**: Finnish \ - * **fr**: French \ - * **de**: German \ - * **el**: Greek \ - * **it**: Italian \ - * **no**: Norwegian (Bokmaal) \ - * **pl**: Polish \ - * **pt-PT**: Portuguese (Portugal) \ - * **ru**: Russian \ - * **es**: Spanish \ - * **sv**: Swedish \ - * **tr**: Turkish - */ -export type SentimentSkillLanguage = string; - -/** Known values of {@link PIIDetectionSkillMaskingMode} that the service accepts. */ -export enum KnownPIIDetectionSkillMaskingMode { - /** No masking occurs and the maskedText output will not be returned. */ - None = "none", - /** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */ - Replace = "replace", -} - -/** - * Defines values for PIIDetectionSkillMaskingMode. \ - * {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: No masking occurs and the maskedText output will not be returned. \ - * **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. - */ -export type PIIDetectionSkillMaskingMode = string; - -/** Known values of {@link SplitSkillLanguage} that the service accepts. */ -export enum KnownSplitSkillLanguage { - /** Amharic */ - Am = "am", - /** Bosnian */ - Bs = "bs", - /** Czech */ - Cs = "cs", - /** Danish */ - Da = "da", - /** German */ - De = "de", - /** English */ - En = "en", - /** Spanish */ - Es = "es", - /** Estonian */ - Et = "et", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** Hebrew */ - He = "he", - /** Hindi */ - Hi = "hi", - /** Croatian */ - Hr = "hr", - /** Hungarian */ - Hu = "hu", - /** Indonesian */ - Id = "id", - /** Icelandic */ - Is = "is", - /** Italian */ - It = "it", - /** Japanese */ - Ja = "ja", - /** Korean */ - Ko = "ko", - /** Latvian */ - Lv = "lv", - /** Norwegian */ - Nb = "nb", - /** Dutch */ - Nl = "nl", - /** Polish */ - Pl = "pl", - /** Portuguese (Portugal) */ - Pt = "pt", - /** Portuguese (Brazil) */ - PtBr = "pt-br", - /** Russian */ - Ru = "ru", - /** Slovak */ - Sk = "sk", - /** Slovenian */ - Sl = "sl", - /** Serbian */ - Sr = "sr", - /** Swedish */ - Sv = "sv", - /** Turkish */ - Tr = "tr", - /** Urdu */ - Ur = "ur", - /** Chinese (Simplified) */ - Zh = "zh", -} - -/** - * Defines values for SplitSkillLanguage. \ - * {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **am**: Amharic \ - * **bs**: Bosnian \ - * **cs**: Czech \ - * **da**: Danish \ - * **de**: German \ - * **en**: English \ - * **es**: Spanish \ - * **et**: Estonian \ - * **fi**: Finnish \ - * **fr**: French \ - * **he**: Hebrew \ - * **hi**: Hindi \ - * **hr**: Croatian \ - * **hu**: Hungarian \ - * **id**: Indonesian \ - * **is**: Icelandic \ - * **it**: Italian \ - * **ja**: Japanese \ - * **ko**: Korean \ - * **lv**: Latvian \ - * **nb**: Norwegian \ - * **nl**: Dutch \ - * **pl**: Polish \ - * **pt**: Portuguese (Portugal) \ - * **pt-br**: Portuguese (Brazil) \ - * **ru**: Russian \ - * **sk**: Slovak \ - * **sl**: Slovenian \ - * **sr**: Serbian \ - * **sv**: Swedish \ - * **tr**: Turkish \ - * **ur**: Urdu \ - * **zh**: Chinese (Simplified) - */ -export type SplitSkillLanguage = string; - -/** Known values of {@link TextSplitMode} that the service accepts. */ -export enum KnownTextSplitMode { - /** Split the text into individual pages. */ - Pages = "pages", - /** Split the text into individual sentences. */ - Sentences = "sentences", -} - -/** - * Defines values for TextSplitMode. \ - * {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **pages**: Split the text into individual pages. \ - * **sentences**: Split the text into individual sentences. - */ -export type TextSplitMode = string; - -/** Known values of {@link SplitSkillUnit} that the service accepts. */ -export enum KnownSplitSkillUnit { - /** The length will be measured by character. */ - Characters = "characters", - /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */ - AzureOpenAITokens = "azureOpenAITokens", -} - -/** - * Defines values for SplitSkillUnit. \ - * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **characters**: The length will be measured by character. \ - * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. - */ -export type SplitSkillUnit = string; - -/** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */ -export enum KnownSplitSkillEncoderModelName { - /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */ - R50KBase = "r50k_base", - /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */ - P50KBase = "p50k_base", - /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */ - P50KEdit = "p50k_edit", - /** A base model with a 100,000 token vocabulary. */ - CL100KBase = "cl100k_base", -} - -/** - * Defines values for SplitSkillEncoderModelName. \ - * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \ - * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \ - * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \ - * **cl100k_base**: A base model with a 100,000 token vocabulary. - */ -export type SplitSkillEncoderModelName = string; - -/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */ -export enum KnownCustomEntityLookupSkillLanguage { - /** Danish */ - Da = "da", - /** German */ - De = "de", - /** English */ - En = "en", - /** Spanish */ - Es = "es", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** Italian */ - It = "it", - /** Korean */ - Ko = "ko", - /** Portuguese */ - Pt = "pt", -} - -/** - * Defines values for CustomEntityLookupSkillLanguage. \ - * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **da**: Danish \ - * **de**: German \ - * **en**: English \ - * **es**: Spanish \ - * **fi**: Finnish \ - * **fr**: French \ - * **it**: Italian \ - * **ko**: Korean \ - * **pt**: Portuguese - */ -export type CustomEntityLookupSkillLanguage = string; - -/** Known values of {@link TextTranslationSkillLanguage} that the service accepts. */ -export enum KnownTextTranslationSkillLanguage { - /** Afrikaans */ - Af = "af", - /** Arabic */ - Ar = "ar", - /** Bangla */ - Bn = "bn", - /** Bosnian (Latin) */ - Bs = "bs", - /** Bulgarian */ - Bg = "bg", - /** Cantonese (Traditional) */ - Yue = "yue", - /** Catalan */ - Ca = "ca", - /** Chinese Simplified */ - ZhHans = "zh-Hans", - /** Chinese Traditional */ - ZhHant = "zh-Hant", - /** Croatian */ - Hr = "hr", - /** Czech */ - Cs = "cs", - /** Danish */ - Da = "da", - /** Dutch */ - Nl = "nl", - /** English */ - En = "en", - /** Estonian */ - Et = "et", - /** Fijian */ - Fj = "fj", - /** Filipino */ - Fil = "fil", - /** Finnish */ - Fi = "fi", - /** French */ - Fr = "fr", - /** German */ - De = "de", - /** Greek */ - El = "el", - /** Haitian Creole */ - Ht = "ht", - /** Hebrew */ - He = "he", - /** Hindi */ - Hi = "hi", - /** Hmong Daw */ - Mww = "mww", - /** Hungarian */ - Hu = "hu", - /** Icelandic */ - Is = "is", - /** Indonesian */ - Id = "id", - /** Italian */ - It = "it", - /** Japanese */ - Ja = "ja", - /** Kiswahili */ - Sw = "sw", - /** Klingon */ - Tlh = "tlh", - /** Klingon (Latin script) */ - TlhLatn = "tlh-Latn", - /** Klingon (Klingon script) */ - TlhPiqd = "tlh-Piqd", - /** Korean */ - Ko = "ko", - /** Latvian */ - Lv = "lv", - /** Lithuanian */ - Lt = "lt", - /** Malagasy */ - Mg = "mg", - /** Malay */ - Ms = "ms", - /** Maltese */ - Mt = "mt", - /** Norwegian */ - Nb = "nb", - /** Persian */ - Fa = "fa", - /** Polish */ - Pl = "pl", - /** Portuguese */ - Pt = "pt", - /** Portuguese (Brazil) */ - PtBr = "pt-br", - /** Portuguese (Portugal) */ - PtPT = "pt-PT", - /** Queretaro Otomi */ - Otq = "otq", - /** Romanian */ - Ro = "ro", - /** Russian */ - Ru = "ru", - /** Samoan */ - Sm = "sm", - /** Serbian (Cyrillic) */ - SrCyrl = "sr-Cyrl", - /** Serbian (Latin) */ - SrLatn = "sr-Latn", - /** Slovak */ - Sk = "sk", - /** Slovenian */ - Sl = "sl", - /** Spanish */ - Es = "es", - /** Swedish */ - Sv = "sv", - /** Tahitian */ - Ty = "ty", - /** Tamil */ - Ta = "ta", - /** Telugu */ - Te = "te", - /** Thai */ - Th = "th", - /** Tongan */ - To = "to", - /** Turkish */ - Tr = "tr", - /** Ukrainian */ - Uk = "uk", - /** Urdu */ - Ur = "ur", - /** Vietnamese */ - Vi = "vi", - /** Welsh */ - Cy = "cy", - /** Yucatec Maya */ - Yua = "yua", - /** Irish */ - Ga = "ga", - /** Kannada */ - Kn = "kn", - /** Maori */ - Mi = "mi", - /** Malayalam */ - Ml = "ml", - /** Punjabi */ - Pa = "pa", -} - -/** - * Defines values for TextTranslationSkillLanguage. \ - * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **af**: Afrikaans \ - * **ar**: Arabic \ - * **bn**: Bangla \ - * **bs**: Bosnian (Latin) \ - * **bg**: Bulgarian \ - * **yue**: Cantonese (Traditional) \ - * **ca**: Catalan \ - * **zh-Hans**: Chinese Simplified \ - * **zh-Hant**: Chinese Traditional \ - * **hr**: Croatian \ - * **cs**: Czech \ - * **da**: Danish \ - * **nl**: Dutch \ - * **en**: English \ - * **et**: Estonian \ - * **fj**: Fijian \ - * **fil**: Filipino \ - * **fi**: Finnish \ - * **fr**: French \ - * **de**: German \ - * **el**: Greek \ - * **ht**: Haitian Creole \ - * **he**: Hebrew \ - * **hi**: Hindi \ - * **mww**: Hmong Daw \ - * **hu**: Hungarian \ - * **is**: Icelandic \ - * **id**: Indonesian \ - * **it**: Italian \ - * **ja**: Japanese \ - * **sw**: Kiswahili \ - * **tlh**: Klingon \ - * **tlh-Latn**: Klingon (Latin script) \ - * **tlh-Piqd**: Klingon (Klingon script) \ - * **ko**: Korean \ - * **lv**: Latvian \ - * **lt**: Lithuanian \ - * **mg**: Malagasy \ - * **ms**: Malay \ - * **mt**: Maltese \ - * **nb**: Norwegian \ - * **fa**: Persian \ - * **pl**: Polish \ - * **pt**: Portuguese \ - * **pt-br**: Portuguese (Brazil) \ - * **pt-PT**: Portuguese (Portugal) \ - * **otq**: Queretaro Otomi \ - * **ro**: Romanian \ - * **ru**: Russian \ - * **sm**: Samoan \ - * **sr-Cyrl**: Serbian (Cyrillic) \ - * **sr-Latn**: Serbian (Latin) \ - * **sk**: Slovak \ - * **sl**: Slovenian \ - * **es**: Spanish \ - * **sv**: Swedish \ - * **ty**: Tahitian \ - * **ta**: Tamil \ - * **te**: Telugu \ - * **th**: Thai \ - * **to**: Tongan \ - * **tr**: Turkish \ - * **uk**: Ukrainian \ - * **ur**: Urdu \ - * **vi**: Vietnamese \ - * **cy**: Welsh \ - * **yua**: Yucatec Maya \ - * **ga**: Irish \ - * **kn**: Kannada \ - * **mi**: Maori \ - * **ml**: Malayalam \ - * **pa**: Punjabi - */ -export type TextTranslationSkillLanguage = string; - -/** Known values of {@link DocumentIntelligenceLayoutSkillOutputFormat} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillOutputFormat { - /** Specify the format of the output as text. */ - Text = "text", - /** Specify the format of the output as markdown. */ - Markdown = "markdown", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillOutputFormat. \ - * {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **text**: Specify the format of the output as text. \ - * **markdown**: Specify the format of the output as markdown. - */ -export type DocumentIntelligenceLayoutSkillOutputFormat = string; - -/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillOutputMode { - /** Specify that the output should be parsed as 'oneToMany'. */ - OneToMany = "oneToMany", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillOutputMode. \ - * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **oneToMany**: Specify that the output should be parsed as 'oneToMany'. - */ -export type DocumentIntelligenceLayoutSkillOutputMode = string; - -/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { - /** Header level 1. */ - H1 = "h1", - /** Header level 2. */ - H2 = "h2", - /** Header level 3. */ - H3 = "h3", - /** Header level 4. */ - H4 = "h4", - /** Header level 5. */ - H5 = "h5", - /** Header level 6. */ - H6 = "h6", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \ - * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **h1**: Header level 1. \ - * **h2**: Header level 2. \ - * **h3**: Header level 3. \ - * **h4**: Header level 4. \ - * **h5**: Header level 5. \ - * **h6**: Header level 6. - */ -export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; - -/** Known values of {@link DocumentIntelligenceLayoutSkillExtractionOptions} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillExtractionOptions { - /** Specify that image content should be extracted from the document. */ - Images = "images", - /** Specify that location metadata should be extracted from the document. */ - LocationMetadata = "locationMetadata", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillExtractionOptions. \ - * {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **images**: Specify that image content should be extracted from the document. \ - * **locationMetadata**: Specify that location metadata should be extracted from the document. - */ -export type DocumentIntelligenceLayoutSkillExtractionOptions = string; - -/** Known values of {@link DocumentIntelligenceLayoutSkillChunkingUnit} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillChunkingUnit { - /** Specifies chunk by characters. */ - Characters = "characters", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillChunkingUnit. \ - * {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **characters**: Specifies chunk by characters. - */ -export type DocumentIntelligenceLayoutSkillChunkingUnit = string; - -/** Known values of {@link ChatCompletionExtraParametersBehavior} that the service accepts. */ -export enum KnownChatCompletionExtraParametersBehavior { - /** Passes any extra parameters directly to the model. */ - PassThrough = "passThrough", - /** Drops all extra parameters. */ - Drop = "drop", - /** Raises an error if any extra parameter is present. */ - Error = "error", -} - -/** - * Defines values for ChatCompletionExtraParametersBehavior. \ - * {@link KnownChatCompletionExtraParametersBehavior} can be used interchangeably with ChatCompletionExtraParametersBehavior, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **passThrough**: Passes any extra parameters directly to the model. \ - * **drop**: Drops all extra parameters. \ - * **error**: Raises an error if any extra parameter is present. - */ -export type ChatCompletionExtraParametersBehavior = string; - -/** Known values of {@link ChatCompletionResponseFormatType} that the service accepts. */ -export enum KnownChatCompletionResponseFormatType { - /** Text */ - Text = "text", - /** JsonObject */ - JsonObject = "jsonObject", - /** JsonSchema */ - JsonSchema = "jsonSchema", -} - -/** - * Defines values for ChatCompletionResponseFormatType. \ - * {@link KnownChatCompletionResponseFormatType} can be used interchangeably with ChatCompletionResponseFormatType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **text** \ - * **jsonObject** \ - * **jsonSchema** - */ -export type ChatCompletionResponseFormatType = string; - -/** Known values of {@link ContentUnderstandingSkillExtractionOptions} that the service accepts. */ -export enum KnownContentUnderstandingSkillExtractionOptions { - /** Specify that image content should be extracted from the document. */ - Images = "images", - /** Specify that location metadata should be extracted from the document. */ - LocationMetadata = "locationMetadata", -} - -/** - * Defines values for ContentUnderstandingSkillExtractionOptions. \ - * {@link KnownContentUnderstandingSkillExtractionOptions} can be used interchangeably with ContentUnderstandingSkillExtractionOptions, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **images**: Specify that image content should be extracted from the document. \ - * **locationMetadata**: Specify that location metadata should be extracted from the document. - */ -export type ContentUnderstandingSkillExtractionOptions = string; - -/** Known values of {@link ContentUnderstandingSkillChunkingUnit} that the service accepts. */ -export enum KnownContentUnderstandingSkillChunkingUnit { - /** Specifies chunk by characters. */ - Characters = "characters", -} - -/** - * Defines values for ContentUnderstandingSkillChunkingUnit. \ - * {@link KnownContentUnderstandingSkillChunkingUnit} can be used interchangeably with ContentUnderstandingSkillChunkingUnit, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **characters**: Specifies chunk by characters. - */ -export type ContentUnderstandingSkillChunkingUnit = string; - -/** Known values of {@link LexicalTokenizerName} that the service accepts. */ -export enum KnownLexicalTokenizerName { - /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */ - Classic = "classic", - /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */ - EdgeNGram = "edgeNGram", - /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */ - Keyword = "keyword_v2", - /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */ - Letter = "letter", - /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */ - Lowercase = "lowercase", - /** Divides text using language-specific rules. */ - MicrosoftLanguageTokenizer = "microsoft_language_tokenizer", - /** Divides text using language-specific rules and reduces words to their base forms. */ - MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer", - /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */ - NGram = "nGram", - /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */ - PathHierarchy = "path_hierarchy_v2", - /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */ - Pattern = "pattern", - /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */ - Standard = "standard_v2", - /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */ - UaxUrlEmail = "uax_url_email", - /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */ - Whitespace = "whitespace", -} - -/** - * Defines values for LexicalTokenizerName. \ - * {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html \ - * **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html \ - * **keyword_v2**: Emits the entire input as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html \ - * **letter**: Divides text at non-letters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html \ - * **lowercase**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html \ - * **microsoft_language_tokenizer**: Divides text using language-specific rules. \ - * **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \ - * **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html \ - * **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html \ - * **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html \ - * **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html \ - * **uax_url_email**: Tokenizes urls and emails as one token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html \ - * **whitespace**: Divides text at whitespace. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html - */ -export type LexicalTokenizerName = string; - -/** Known values of {@link RegexFlags} that the service accepts. */ -export enum KnownRegexFlags { - /** Enables canonical equivalence. */ - CanonEq = "CANON_EQ", - /** Enables case-insensitive matching. */ - CaseInsensitive = "CASE_INSENSITIVE", - /** Permits whitespace and comments in the pattern. */ - Comments = "COMMENTS", - /** Enables dotall mode. */ - DotAll = "DOTALL", - /** Enables literal parsing of the pattern. */ - Literal = "LITERAL", - /** Enables multiline mode. */ - Multiline = "MULTILINE", - /** Enables Unicode-aware case folding. */ - UnicodeCase = "UNICODE_CASE", - /** Enables Unix lines mode. */ - UnixLines = "UNIX_LINES", -} - -/** - * Defines values for RegexFlags. \ - * {@link KnownRegexFlags} can be used interchangeably with RegexFlags, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **CANON_EQ**: Enables canonical equivalence. \ - * **CASE_INSENSITIVE**: Enables case-insensitive matching. \ - * **COMMENTS**: Permits whitespace and comments in the pattern. \ - * **DOTALL**: Enables dotall mode. \ - * **LITERAL**: Enables literal parsing of the pattern. \ - * **MULTILINE**: Enables multiline mode. \ - * **UNICODE_CASE**: Enables Unicode-aware case folding. \ - * **UNIX_LINES**: Enables Unix lines mode. - */ -export type RegexFlags = string; -/** Defines values for IndexerStatus. */ -export type IndexerStatus = "unknown" | "error" | "running"; -/** Defines values for IndexerExecutionStatus. */ -export type IndexerExecutionStatus = - | "transientFailure" - | "success" - | "inProgress" - | "reset"; -/** Defines values for ScoringFunctionInterpolation. */ -export type ScoringFunctionInterpolation = - | "linear" - | "constant" - | "quadratic" - | "logarithmic"; -/** Defines values for ScoringFunctionAggregation. */ -export type ScoringFunctionAggregation = - | "sum" - | "average" - | "minimum" - | "maximum" - | "firstMatching" - | "product"; -/** Defines values for TokenCharacterKind. */ -export type TokenCharacterKind = - | "letter" - | "digit" - | "whitespace" - | "punctuation" - | "symbol"; -/** Defines values for MicrosoftTokenizerLanguage. */ -export type MicrosoftTokenizerLanguage = - | "bangla" - | "bulgarian" - | "catalan" - | "chineseSimplified" - | "chineseTraditional" - | "croatian" - | "czech" - | "danish" - | "dutch" - | "english" - | "french" - | "german" - | "greek" - | "gujarati" - | "hindi" - | "icelandic" - | "indonesian" - | "italian" - | "japanese" - | "kannada" - | "korean" - | "malay" - | "malayalam" - | "marathi" - | "norwegianBokmaal" - | "polish" - | "portuguese" - | "portugueseBrazilian" - | "punjabi" - | "romanian" - | "russian" - | "serbianCyrillic" - | "serbianLatin" - | "slovenian" - | "spanish" - | "swedish" - | "tamil" - | "telugu" - | "thai" - | "ukrainian" - | "urdu" - | "vietnamese"; -/** Defines values for MicrosoftStemmingTokenizerLanguage. */ -export type MicrosoftStemmingTokenizerLanguage = - | "arabic" - | "bangla" - | "bulgarian" - | "catalan" - | "croatian" - | "czech" - | "danish" - | "dutch" - | "english" - | "estonian" - | "finnish" - | "french" - | "german" - | "greek" - | "gujarati" - | "hebrew" - | "hindi" - | "hungarian" - | "icelandic" - | "indonesian" - | "italian" - | "kannada" - | "latvian" - | "lithuanian" - | "malay" - | "malayalam" - | "marathi" - | "norwegianBokmaal" - | "polish" - | "portuguese" - | "portugueseBrazilian" - | "punjabi" - | "romanian" - | "russian" - | "serbianCyrillic" - | "serbianLatin" - | "slovak" - | "slovenian" - | "spanish" - | "swedish" - | "tamil" - | "telugu" - | "turkish" - | "ukrainian" - | "urdu"; -/** Defines values for CjkBigramTokenFilterScripts. */ -export type CjkBigramTokenFilterScripts = - | "han" - | "hiragana" - | "katakana" - | "hangul"; -/** Defines values for EdgeNGramTokenFilterSide. */ -export type EdgeNGramTokenFilterSide = "front" | "back"; -/** Defines values for PhoneticEncoder. */ -export type PhoneticEncoder = - | "metaphone" - | "doubleMetaphone" - | "soundex" - | "refinedSoundex" - | "caverphone1" - | "caverphone2" - | "cologne" - | "nysiis" - | "koelnerPhonetik" - | "haasePhonetik" - | "beiderMorse"; -/** Defines values for SnowballTokenFilterLanguage. */ -export type SnowballTokenFilterLanguage = - | "armenian" - | "basque" - | "catalan" - | "danish" - | "dutch" - | "english" - | "finnish" - | "french" - | "german" - | "german2" - | "hungarian" - | "italian" - | "kp" - | "lovins" - | "norwegian" - | "porter" - | "portuguese" - | "romanian" - | "russian" - | "spanish" - | "swedish" - | "turkish"; -/** Defines values for StemmerTokenFilterLanguage. */ -export type StemmerTokenFilterLanguage = - | "arabic" - | "armenian" - | "basque" - | "brazilian" - | "bulgarian" - | "catalan" - | "czech" - | "danish" - | "dutch" - | "dutchKp" - | "english" - | "lightEnglish" - | "minimalEnglish" - | "possessiveEnglish" - | "porter2" - | "lovins" - | "finnish" - | "lightFinnish" - | "french" - | "lightFrench" - | "minimalFrench" - | "galician" - | "minimalGalician" - | "german" - | "german2" - | "lightGerman" - | "minimalGerman" - | "greek" - | "hindi" - | "hungarian" - | "lightHungarian" - | "indonesian" - | "irish" - | "italian" - | "lightItalian" - | "sorani" - | "latvian" - | "norwegian" - | "lightNorwegian" - | "minimalNorwegian" - | "lightNynorsk" - | "minimalNynorsk" - | "portuguese" - | "lightPortuguese" - | "minimalPortuguese" - | "portugueseRslp" - | "romanian" - | "russian" - | "lightRussian" - | "spanish" - | "lightSpanish" - | "swedish" - | "lightSwedish" - | "turkish"; -/** Defines values for StopwordsList. */ -export type StopwordsList = - | "arabic" - | "armenian" - | "basque" - | "brazilian" - | "bulgarian" - | "catalan" - | "czech" - | "danish" - | "dutch" - | "english" - | "finnish" - | "french" - | "galician" - | "german" - | "greek" - | "hindi" - | "hungarian" - | "indonesian" - | "irish" - | "italian" - | "latvian" - | "norwegian" - | "persian" - | "portuguese" - | "romanian" - | "russian" - | "sorani" - | "spanish" - | "swedish" - | "thai" - | "turkish"; - -/** Optional parameters. */ -export interface KnowledgeBasesCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Contains response data for the createOrUpdate operation. */ -export type KnowledgeBasesCreateOrUpdateResponse = KnowledgeBase; - -/** Optional parameters. */ -export interface KnowledgeBasesDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface KnowledgeBasesGetOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type KnowledgeBasesGetResponse = KnowledgeBase; - -/** Optional parameters. */ -export interface KnowledgeBasesListOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the list operation. */ -export type KnowledgeBasesListResponse = ListKnowledgeBasesResult; - -/** Optional parameters. */ -export interface KnowledgeBasesCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type KnowledgeBasesCreateResponse = KnowledgeBase; - -/** Optional parameters. */ -export interface KnowledgeSourcesCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Contains response data for the createOrUpdate operation. */ -export type KnowledgeSourcesCreateOrUpdateResponse = KnowledgeSourceUnion; - -/** Optional parameters. */ -export interface KnowledgeSourcesDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface KnowledgeSourcesGetOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type KnowledgeSourcesGetResponse = KnowledgeSourceUnion; - -/** Optional parameters. */ -export interface KnowledgeSourcesListOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the list operation. */ -export type KnowledgeSourcesListResponse = ListKnowledgeSourcesResult; - -/** Optional parameters. */ -export interface KnowledgeSourcesCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type KnowledgeSourcesCreateResponse = KnowledgeSourceUnion; - -/** Optional parameters. */ -export interface KnowledgeSourcesGetStatusOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the getStatus operation. */ -export type KnowledgeSourcesGetStatusResponse = KnowledgeSourceStatus; - -/** Optional parameters. */ -export interface DataSourcesCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; -} - -/** Contains response data for the createOrUpdate operation. */ -export type DataSourcesCreateOrUpdateResponse = SearchIndexerDataSource; - -/** Optional parameters. */ -export interface DataSourcesDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface DataSourcesGetOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type DataSourcesGetResponse = SearchIndexerDataSource; - -/** Optional parameters. */ -export interface DataSourcesListOptionalParams - extends coreClient.OperationOptions { - /** Selects which top-level properties of the data sources to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ - select?: string; -} - -/** Contains response data for the list operation. */ -export type DataSourcesListResponse = ListDataSourcesResult; - -/** Optional parameters. */ -export interface DataSourcesCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type DataSourcesCreateResponse = SearchIndexerDataSource; - -/** Optional parameters. */ -export interface IndexersResetOptionalParams - extends coreClient.OperationOptions {} - -/** Optional parameters. */ -export interface IndexersResetDocsOptionalParams - extends coreClient.OperationOptions { - keysOrIds?: DocumentKeysOrIds; - /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */ - overwrite?: boolean; -} - -/** Optional parameters. */ -export interface IndexersResyncOptionalParams - extends coreClient.OperationOptions {} - -/** Optional parameters. */ -export interface IndexersRunOptionalParams - extends coreClient.OperationOptions {} - -/** Optional parameters. */ -export interface IndexersCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; - /** Disables cache reprocessing change detection. */ - disableCacheReprocessingChangeDetection?: boolean; -} - -/** Contains response data for the createOrUpdate operation. */ -export type IndexersCreateOrUpdateResponse = SearchIndexer; - -/** Optional parameters. */ -export interface IndexersDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface IndexersGetOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type IndexersGetResponse = SearchIndexer; - -/** Optional parameters. */ -export interface IndexersListOptionalParams - extends coreClient.OperationOptions { - /** Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ - select?: string; -} - -/** Contains response data for the list operation. */ -export type IndexersListResponse = ListIndexersResult; - -/** Optional parameters. */ -export interface IndexersCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type IndexersCreateResponse = SearchIndexer; - -/** Optional parameters. */ -export interface IndexersGetStatusOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the getStatus operation. */ -export type IndexersGetStatusResponse = SearchIndexerStatus; - -/** Optional parameters. */ -export interface SkillsetsCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; - /** Disables cache reprocessing change detection. */ - disableCacheReprocessingChangeDetection?: boolean; -} - -/** Contains response data for the createOrUpdate operation. */ -export type SkillsetsCreateOrUpdateResponse = SearchIndexerSkillset; - -/** Optional parameters. */ -export interface SkillsetsDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface SkillsetsGetOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type SkillsetsGetResponse = SearchIndexerSkillset; - -/** Optional parameters. */ -export interface SkillsetsListOptionalParams - extends coreClient.OperationOptions { - /** Selects which top-level properties of the skillsets to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ - select?: string; -} - -/** Contains response data for the list operation. */ -export type SkillsetsListResponse = ListSkillsetsResult; - -/** Optional parameters. */ -export interface SkillsetsCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type SkillsetsCreateResponse = SearchIndexerSkillset; - -/** Optional parameters. */ -export interface SkillsetsResetSkillsOptionalParams - extends coreClient.OperationOptions {} - -/** Optional parameters. */ -export interface SynonymMapsCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Contains response data for the createOrUpdate operation. */ -export type SynonymMapsCreateOrUpdateResponse = SynonymMap; - -/** Optional parameters. */ -export interface SynonymMapsDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface SynonymMapsGetOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type SynonymMapsGetResponse = SynonymMap; - -/** Optional parameters. */ -export interface SynonymMapsListOptionalParams - extends coreClient.OperationOptions { - /** Selects which top-level properties of the synonym maps to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ - select?: string; -} - -/** Contains response data for the list operation. */ -export type SynonymMapsListResponse = ListSynonymMapsResult; - -/** Optional parameters. */ -export interface SynonymMapsCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type SynonymMapsCreateResponse = SynonymMap; - -/** Optional parameters. */ -export interface IndexesCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type IndexesCreateResponse = SearchIndex; - -/** Optional parameters. */ -export interface IndexesListOptionalParams extends coreClient.OperationOptions { - /** Selects which top-level properties of the index definitions to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ - select?: string; -} - -/** Contains response data for the list operation. */ -export type IndexesListResponse = ListIndexesResult; - -/** Optional parameters. */ -export interface IndexesCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; - /** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */ - allowIndexDowntime?: boolean; -} - -/** Contains response data for the createOrUpdate operation. */ -export type IndexesCreateOrUpdateResponse = SearchIndex; - -/** Optional parameters. */ -export interface IndexesDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface IndexesGetOptionalParams extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type IndexesGetResponse = SearchIndex; - -/** Optional parameters. */ -export interface IndexesGetStatisticsOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the getStatistics operation. */ -export type IndexesGetStatisticsResponse = GetIndexStatisticsResult; - -/** Optional parameters. */ -export interface IndexesAnalyzeOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the analyze operation. */ -export type IndexesAnalyzeResponse = AnalyzeResult; - -/** Optional parameters. */ -export interface AliasesCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type AliasesCreateResponse = SearchAlias; - -/** Optional parameters. */ -export interface AliasesListOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the list operation. */ -export type AliasesListResponse = ListAliasesResult; - -/** Optional parameters. */ -export interface AliasesCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Contains response data for the createOrUpdate operation. */ -export type AliasesCreateOrUpdateResponse = SearchAlias; - -/** Optional parameters. */ -export interface AliasesDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface AliasesGetOptionalParams extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type AliasesGetResponse = SearchAlias; - -/** Optional parameters. */ -export interface GetServiceStatisticsOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the getServiceStatistics operation. */ -export type GetServiceStatisticsResponse = ServiceStatistics; - -/** Optional parameters. */ -export interface GetIndexStatsSummaryOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the getIndexStatsSummary operation. */ -export type GetIndexStatsSummaryResponse = ListIndexStatsSummary; - -/** Optional parameters. */ -export interface SearchServiceClientOptionalParams - extends coreHttpCompat.ExtendedServiceClientOptions { - /** Overrides client endpoint. */ - endpoint?: string; -} diff --git a/sdk/search/search-documents/src/generated/service/models/mappers.ts b/sdk/search/search-documents/src/generated/service/models/mappers.ts deleted file mode 100644 index 3c75826e1219..000000000000 --- a/sdk/search/search-documents/src/generated/service/models/mappers.ts +++ /dev/null @@ -1,8497 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; - -export const KnowledgeBase: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBase", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - knowledgeSources: { - serializedName: "knowledgeSources", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeSourceReference", - }, - }, - }, - }, - models: { - serializedName: "models", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBaseModel", - }, - }, - }, - }, - retrievalReasoningEffort: { - serializedName: "retrievalReasoningEffort", - type: { - name: "Composite", - className: "KnowledgeRetrievalReasoningEffort", - }, - }, - outputMode: { - serializedName: "outputMode", - type: { - name: "String", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - retrievalInstructions: { - serializedName: "retrievalInstructions", - type: { - name: "String", - }, - }, - answerInstructions: { - serializedName: "answerInstructions", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeSourceReference: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSourceReference", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeBaseModel: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeBaseModel", - uberParent: "KnowledgeBaseModel", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const KnowledgeRetrievalReasoningEffort: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeRetrievalReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchResourceEncryptionKey: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - modelProperties: { - keyName: { - serializedName: "keyVaultKeyName", - required: true, - type: { - name: "String", - }, - }, - keyVersion: { - serializedName: "keyVaultKeyVersion", - type: { - name: "String", - }, - }, - vaultUri: { - serializedName: "keyVaultUri", - required: true, - type: { - name: "String", - }, - }, - accessCredentials: { - serializedName: "accessCredentials", - type: { - name: "Composite", - className: "AzureActiveDirectoryApplicationCredentials", - }, - }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - -export const AzureActiveDirectoryApplicationCredentials: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "AzureActiveDirectoryApplicationCredentials", - modelProperties: { - applicationId: { - serializedName: "applicationId", - required: true, - type: { - name: "String", - }, - }, - applicationSecret: { - serializedName: "applicationSecret", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const SearchIndexerDataIdentity: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - uberParent: "SearchIndexerDataIdentity", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ErrorResponse: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorResponse", - modelProperties: { - error: { - serializedName: "error", - type: { - name: "Composite", - className: "ErrorDetail", - }, - }, - }, - }, -}; - -export const ErrorDetail: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorDetail", - modelProperties: { - code: { - serializedName: "code", - readOnly: true, - type: { - name: "String", - }, - }, - message: { - serializedName: "message", - readOnly: true, - type: { - name: "String", - }, - }, - target: { - serializedName: "target", - readOnly: true, - type: { - name: "String", - }, - }, - details: { - serializedName: "details", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ErrorDetail", - }, - }, - }, - }, - additionalInfo: { - serializedName: "additionalInfo", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ErrorAdditionalInfo", - }, - }, - }, - }, - }, - }, -}; - -export const ErrorAdditionalInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ErrorAdditionalInfo", - modelProperties: { - type: { - serializedName: "type", - readOnly: true, - type: { - name: "String", - }, - }, - info: { - serializedName: "info", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const ListKnowledgeBasesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListKnowledgeBasesResult", - modelProperties: { - knowledgeBases: { - serializedName: "value", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeBase", - }, - }, - }, - }, - }, - }, -}; - -export const KnowledgeSource: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - }, - }, -}; - -export const ListKnowledgeSourcesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListKnowledgeSourcesResult", - modelProperties: { - knowledgeSources: { - serializedName: "value", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "KnowledgeSource", - }, - }, - }, - }, - }, - }, -}; - -export const KnowledgeSourceStatus: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSourceStatus", - modelProperties: { - synchronizationStatus: { - serializedName: "synchronizationStatus", - required: true, - type: { - name: "String", - }, - }, - synchronizationInterval: { - serializedName: "synchronizationInterval", - nullable: true, - type: { - name: "String", - }, - }, - currentSynchronizationState: { - serializedName: "currentSynchronizationState", - type: { - name: "Composite", - className: "SynchronizationState", - }, - }, - lastSynchronizationState: { - serializedName: "lastSynchronizationState", - type: { - name: "Composite", - className: "CompletedSynchronizationState", - }, - }, - statistics: { - serializedName: "statistics", - type: { - name: "Composite", - className: "KnowledgeSourceStatistics", - }, - }, - }, - }, -}; - -export const SynchronizationState: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SynchronizationState", - modelProperties: { - startTime: { - serializedName: "startTime", - required: true, - type: { - name: "DateTime", - }, - }, - itemsUpdatesProcessed: { - serializedName: "itemsUpdatesProcessed", - required: true, - type: { - name: "Number", - }, - }, - itemsUpdatesFailed: { - serializedName: "itemsUpdatesFailed", - required: true, - type: { - name: "Number", - }, - }, - itemsSkipped: { - serializedName: "itemsSkipped", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const CompletedSynchronizationState: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CompletedSynchronizationState", - modelProperties: { - startTime: { - serializedName: "startTime", - required: true, - type: { - name: "DateTime", - }, - }, - endTime: { - serializedName: "endTime", - required: true, - type: { - name: "DateTime", - }, - }, - itemsUpdatesProcessed: { - serializedName: "itemsUpdatesProcessed", - required: true, - type: { - name: "Number", - }, - }, - itemsUpdatesFailed: { - serializedName: "itemsUpdatesFailed", - required: true, - type: { - name: "Number", - }, - }, - itemsSkipped: { - serializedName: "itemsSkipped", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const KnowledgeSourceStatistics: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSourceStatistics", - modelProperties: { - totalSynchronization: { - serializedName: "totalSynchronization", - required: true, - type: { - name: "Number", - }, - }, - averageSynchronizationDuration: { - serializedName: "averageSynchronizationDuration", - required: true, - type: { - name: "String", - }, - }, - averageItemsProcessedPerSynchronization: { - serializedName: "averageItemsProcessedPerSynchronization", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SearchIndexerDataSource: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerDataSource", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - subType: { - serializedName: "subType", - readOnly: true, - type: { - name: "String", - }, - }, - credentials: { - serializedName: "credentials", - type: { - name: "Composite", - className: "DataSourceCredentials", - }, - }, - container: { - serializedName: "container", - type: { - name: "Composite", - className: "SearchIndexerDataContainer", - }, - }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - indexerPermissionOptions: { - serializedName: "indexerPermissionOptions", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - dataChangeDetectionPolicy: { - serializedName: "dataChangeDetectionPolicy", - type: { - name: "Composite", - className: "DataChangeDetectionPolicy", - }, - }, - dataDeletionDetectionPolicy: { - serializedName: "dataDeletionDetectionPolicy", - type: { - name: "Composite", - className: "DataDeletionDetectionPolicy", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - }, - }, -}; - -export const DataSourceCredentials: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DataSourceCredentials", - modelProperties: { - connectionString: { - serializedName: "connectionString", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerDataContainer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerDataContainer", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - query: { - serializedName: "query", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DataChangeDetectionPolicy: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DataChangeDetectionPolicy", - uberParent: "DataChangeDetectionPolicy", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DataDeletionDetectionPolicy: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DataDeletionDetectionPolicy", - uberParent: "DataDeletionDetectionPolicy", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ListDataSourcesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListDataSourcesResult", - modelProperties: { - dataSources: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerDataSource", - }, - }, - }, - }, - }, - }, -}; - -export const DocumentKeysOrIds: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DocumentKeysOrIds", - modelProperties: { - documentKeys: { - serializedName: "documentKeys", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - datasourceDocumentIds: { - serializedName: "datasourceDocumentIds", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const IndexerResyncBody: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexerResyncBody", - modelProperties: { - options: { - serializedName: "options", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const SearchIndexer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexer", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - dataSourceName: { - serializedName: "dataSourceName", - required: true, - type: { - name: "String", - }, - }, - skillsetName: { - serializedName: "skillsetName", - type: { - name: "String", - }, - }, - targetIndexName: { - serializedName: "targetIndexName", - required: true, - type: { - name: "String", - }, - }, - schedule: { - serializedName: "schedule", - type: { - name: "Composite", - className: "IndexingSchedule", - }, - }, - parameters: { - serializedName: "parameters", - type: { - name: "Composite", - className: "IndexingParameters", - }, - }, - fieldMappings: { - serializedName: "fieldMappings", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "FieldMapping", - }, - }, - }, - }, - outputFieldMappings: { - serializedName: "outputFieldMappings", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "FieldMapping", - }, - }, - }, - }, - isDisabled: { - defaultValue: false, - serializedName: "disabled", - nullable: true, - type: { - name: "Boolean", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - cache: { - serializedName: "cache", - type: { - name: "Composite", - className: "SearchIndexerCache", - }, - }, - }, - }, -}; - -export const IndexingSchedule: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexingSchedule", - modelProperties: { - interval: { - serializedName: "interval", - required: true, - type: { - name: "TimeSpan", - }, - }, - startTime: { - serializedName: "startTime", - type: { - name: "DateTime", - }, - }, - }, - }, -}; - -export const IndexingParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexingParameters", - modelProperties: { - batchSize: { - serializedName: "batchSize", - nullable: true, - type: { - name: "Number", - }, - }, - maxFailedItems: { - defaultValue: 0, - serializedName: "maxFailedItems", - nullable: true, - type: { - name: "Number", - }, - }, - maxFailedItemsPerBatch: { - defaultValue: 0, - serializedName: "maxFailedItemsPerBatch", - nullable: true, - type: { - name: "Number", - }, - }, - configuration: { - serializedName: "configuration", - type: { - name: "Composite", - className: "IndexingParametersConfiguration", - }, - }, - }, - }, -}; - -export const IndexingParametersConfiguration: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexingParametersConfiguration", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - parsingMode: { - defaultValue: "default", - serializedName: "parsingMode", - type: { - name: "String", - }, - }, - excludedFileNameExtensions: { - defaultValue: "", - serializedName: "excludedFileNameExtensions", - type: { - name: "String", - }, - }, - indexedFileNameExtensions: { - defaultValue: "", - serializedName: "indexedFileNameExtensions", - type: { - name: "String", - }, - }, - failOnUnsupportedContentType: { - defaultValue: false, - serializedName: "failOnUnsupportedContentType", - type: { - name: "Boolean", - }, - }, - failOnUnprocessableDocument: { - defaultValue: false, - serializedName: "failOnUnprocessableDocument", - type: { - name: "Boolean", - }, - }, - indexStorageMetadataOnlyForOversizedDocuments: { - defaultValue: false, - serializedName: "indexStorageMetadataOnlyForOversizedDocuments", - type: { - name: "Boolean", - }, - }, - delimitedTextHeaders: { - serializedName: "delimitedTextHeaders", - type: { - name: "String", - }, - }, - delimitedTextDelimiter: { - serializedName: "delimitedTextDelimiter", - type: { - name: "String", - }, - }, - firstLineContainsHeaders: { - defaultValue: true, - serializedName: "firstLineContainsHeaders", - type: { - name: "Boolean", - }, - }, - markdownParsingSubmode: { - defaultValue: "oneToMany", - serializedName: "markdownParsingSubmode", - nullable: true, - type: { - name: "String", - }, - }, - markdownHeaderDepth: { - defaultValue: "h6", - serializedName: "markdownHeaderDepth", - nullable: true, - type: { - name: "String", - }, - }, - documentRoot: { - serializedName: "documentRoot", - type: { - name: "String", - }, - }, - dataToExtract: { - defaultValue: "contentAndMetadata", - serializedName: "dataToExtract", - type: { - name: "String", - }, - }, - imageAction: { - defaultValue: "none", - serializedName: "imageAction", - type: { - name: "String", - }, - }, - allowSkillsetToReadFileData: { - defaultValue: false, - serializedName: "allowSkillsetToReadFileData", - type: { - name: "Boolean", - }, - }, - pdfTextRotationAlgorithm: { - defaultValue: "none", - serializedName: "pdfTextRotationAlgorithm", - type: { - name: "String", - }, - }, - executionEnvironment: { - defaultValue: "standard", - serializedName: "executionEnvironment", - type: { - name: "String", - }, - }, - queryTimeout: { - defaultValue: "00:05:00", - serializedName: "queryTimeout", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const FieldMapping: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "FieldMapping", - modelProperties: { - sourceFieldName: { - serializedName: "sourceFieldName", - required: true, - type: { - name: "String", - }, - }, - targetFieldName: { - serializedName: "targetFieldName", - type: { - name: "String", - }, - }, - mappingFunction: { - serializedName: "mappingFunction", - type: { - name: "Composite", - className: "FieldMappingFunction", - }, - }, - }, - }, -}; - -export const FieldMappingFunction: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "FieldMappingFunction", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - parameters: { - serializedName: "parameters", - nullable: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const SearchIndexerCache: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerCache", - modelProperties: { - id: { - serializedName: "id", - type: { - name: "String", - }, - }, - storageConnectionString: { - serializedName: "storageConnectionString", - type: { - name: "String", - }, - }, - enableReprocessing: { - serializedName: "enableReprocessing", - nullable: true, - type: { - name: "Boolean", - }, - }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - -export const ListIndexersResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListIndexersResult", - modelProperties: { - indexers: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexer", - }, - }, - }, - }, - }, - }, -}; - -export const SearchIndexerStatus: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerStatus", - modelProperties: { - name: { - serializedName: "name", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - status: { - serializedName: "status", - required: true, - readOnly: true, - type: { - name: "Enum", - allowedValues: ["unknown", "error", "running"], - }, - }, - runtime: { - serializedName: "runtime", - type: { - name: "Composite", - className: "IndexerRuntime", - }, - }, - lastResult: { - serializedName: "lastResult", - type: { - name: "Composite", - className: "IndexerExecutionResult", - }, - }, - executionHistory: { - serializedName: "executionHistory", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "IndexerExecutionResult", - }, - }, - }, - }, - limits: { - serializedName: "limits", - type: { - name: "Composite", - className: "SearchIndexerLimits", - }, - }, - currentState: { - serializedName: "currentState", - type: { - name: "Composite", - className: "IndexerState", - }, - }, - }, - }, -}; - -export const IndexerRuntime: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexerRuntime", - modelProperties: { - usedSeconds: { - serializedName: "usedSeconds", - required: true, - type: { - name: "Number", - }, - }, - remainingSeconds: { - serializedName: "remainingSeconds", - nullable: true, - type: { - name: "Number", - }, - }, - beginningTime: { - serializedName: "beginningTime", - required: true, - type: { - name: "DateTime", - }, - }, - endingTime: { - serializedName: "endingTime", - required: true, - type: { - name: "DateTime", - }, - }, - }, - }, -}; - -export const IndexerExecutionResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexerExecutionResult", - modelProperties: { - status: { - serializedName: "status", - required: true, - readOnly: true, - type: { - name: "Enum", - allowedValues: ["transientFailure", "success", "inProgress", "reset"], - }, - }, - statusDetail: { - serializedName: "statusDetail", - readOnly: true, - type: { - name: "String", - }, - }, - mode: { - serializedName: "mode", - readOnly: true, - type: { - name: "String", - }, - }, - errorMessage: { - serializedName: "errorMessage", - readOnly: true, - type: { - name: "String", - }, - }, - startTime: { - serializedName: "startTime", - readOnly: true, - type: { - name: "DateTime", - }, - }, - endTime: { - serializedName: "endTime", - readOnly: true, - nullable: true, - type: { - name: "DateTime", - }, - }, - errors: { - serializedName: "errors", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerError", - }, - }, - }, - }, - warnings: { - serializedName: "warnings", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerWarning", - }, - }, - }, - }, - itemCount: { - serializedName: "itemsProcessed", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - failedItemCount: { - serializedName: "itemsFailed", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - initialTrackingState: { - serializedName: "initialTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - finalTrackingState: { - serializedName: "finalTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerError: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerError", - modelProperties: { - key: { - serializedName: "key", - readOnly: true, - type: { - name: "String", - }, - }, - errorMessage: { - serializedName: "errorMessage", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - statusCode: { - serializedName: "statusCode", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - name: { - serializedName: "name", - readOnly: true, - type: { - name: "String", - }, - }, - details: { - serializedName: "details", - readOnly: true, - type: { - name: "String", - }, - }, - documentationLink: { - serializedName: "documentationLink", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerWarning: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerWarning", - modelProperties: { - key: { - serializedName: "key", - readOnly: true, - type: { - name: "String", - }, - }, - message: { - serializedName: "message", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - readOnly: true, - type: { - name: "String", - }, - }, - details: { - serializedName: "details", - readOnly: true, - type: { - name: "String", - }, - }, - documentationLink: { - serializedName: "documentationLink", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerLimits: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerLimits", - modelProperties: { - maxRunTime: { - serializedName: "maxRunTime", - readOnly: true, - type: { - name: "TimeSpan", - }, - }, - maxDocumentExtractionSize: { - serializedName: "maxDocumentExtractionSize", - readOnly: true, - type: { - name: "Number", - }, - }, - maxDocumentContentCharactersToExtract: { - serializedName: "maxDocumentContentCharactersToExtract", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const IndexerState: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexerState", - modelProperties: { - mode: { - serializedName: "mode", - readOnly: true, - type: { - name: "String", - }, - }, - allDocsInitialTrackingState: { - serializedName: "allDocsInitialTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - allDocsFinalTrackingState: { - serializedName: "allDocsFinalTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resetDocsInitialTrackingState: { - serializedName: "resetDocsInitialTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resetDocsFinalTrackingState: { - serializedName: "resetDocsFinalTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resetDocumentKeys: { - serializedName: "resetDocumentKeys", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - resetDatasourceDocumentIds: { - serializedName: "resetDatasourceDocumentIds", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - resyncInitialTrackingState: { - serializedName: "resyncInitialTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resyncFinalTrackingState: { - serializedName: "resyncFinalTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerSkillset: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerSkillset", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - skills: { - serializedName: "skills", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerSkill", - }, - }, - }, - }, - cognitiveServicesAccount: { - serializedName: "cognitiveServices", - type: { - name: "Composite", - className: "CognitiveServicesAccount", - }, - }, - knowledgeStore: { - serializedName: "knowledgeStore", - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStore", - }, - }, - indexProjection: { - serializedName: "indexProjections", - type: { - name: "Composite", - className: "SearchIndexerIndexProjection", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - }, - }, -}; - -export const SearchIndexerSkill: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - context: { - serializedName: "context", - type: { - name: "String", - }, - }, - inputs: { - serializedName: "inputs", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "InputFieldMappingEntry", - }, - }, - }, - }, - outputs: { - serializedName: "outputs", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "OutputFieldMappingEntry", - }, - }, - }, - }, - }, - }, -}; - -export const InputFieldMappingEntry: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "InputFieldMappingEntry", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - source: { - serializedName: "source", - type: { - name: "String", - }, - }, - sourceContext: { - serializedName: "sourceContext", - type: { - name: "String", - }, - }, - inputs: { - serializedName: "inputs", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "InputFieldMappingEntry", - }, - }, - }, - }, - }, - }, -}; - -export const OutputFieldMappingEntry: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "OutputFieldMappingEntry", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - targetName: { - serializedName: "targetName", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const CognitiveServicesAccount: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CognitiveServicesAccount", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerKnowledgeStore: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStore", - modelProperties: { - storageConnectionString: { - serializedName: "storageConnectionString", - required: true, - type: { - name: "String", - }, - }, - projections: { - serializedName: "projections", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreProjection", - }, - }, - }, - }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - parameters: { - serializedName: "parameters", - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreParameters", - }, - }, - }, - }, -}; - -export const SearchIndexerKnowledgeStoreProjection: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreProjection", - modelProperties: { - tables: { - serializedName: "tables", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreTableProjectionSelector", - }, - }, - }, - }, - objects: { - serializedName: "objects", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: - "SearchIndexerKnowledgeStoreObjectProjectionSelector", - }, - }, - }, - }, - files: { - serializedName: "files", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreFileProjectionSelector", - }, - }, - }, - }, - }, - }, - }; - -export const SearchIndexerKnowledgeStoreProjectionSelector: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreProjectionSelector", - modelProperties: { - referenceKeyName: { - serializedName: "referenceKeyName", - type: { - name: "String", - }, - }, - generatedKeyName: { - serializedName: "generatedKeyName", - type: { - name: "String", - }, - }, - source: { - serializedName: "source", - type: { - name: "String", - }, - }, - sourceContext: { - serializedName: "sourceContext", - type: { - name: "String", - }, - }, - inputs: { - serializedName: "inputs", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "InputFieldMappingEntry", - }, - }, - }, - }, - }, - }, - }; - -export const SearchIndexerKnowledgeStoreParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreParameters", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - synthesizeGeneratedKeyName: { - defaultValue: false, - serializedName: "synthesizeGeneratedKeyName", - type: { - name: "Boolean", - }, - }, - }, - }, - }; - -export const SearchIndexerIndexProjection: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerIndexProjection", - modelProperties: { - selectors: { - serializedName: "selectors", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerIndexProjectionSelector", - }, - }, - }, - }, - parameters: { - serializedName: "parameters", - type: { - name: "Composite", - className: "SearchIndexerIndexProjectionParameters", - }, - }, - }, - }, -}; - -export const SearchIndexerIndexProjectionSelector: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerIndexProjectionSelector", - modelProperties: { - targetIndexName: { - serializedName: "targetIndexName", - required: true, - type: { - name: "String", - }, - }, - parentKeyFieldName: { - serializedName: "parentKeyFieldName", - required: true, - type: { - name: "String", - }, - }, - sourceContext: { - serializedName: "sourceContext", - required: true, - type: { - name: "String", - }, - }, - mappings: { - serializedName: "mappings", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "InputFieldMappingEntry", - }, - }, - }, - }, - }, - }, - }; - -export const SearchIndexerIndexProjectionParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerIndexProjectionParameters", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - projectionMode: { - serializedName: "projectionMode", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const ListSkillsetsResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListSkillsetsResult", - modelProperties: { - skillsets: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexerSkillset", - }, - }, - }, - }, - }, - }, -}; - -export const SkillNames: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SkillNames", - modelProperties: { - skillNames: { - serializedName: "skillNames", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const SynonymMap: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SynonymMap", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - format: { - defaultValue: "solr", - isConstant: true, - serializedName: "format", - type: { - name: "String", - }, - }, - synonyms: { - serializedName: "synonyms", - required: true, - type: { - name: "String", - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ListSynonymMapsResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListSynonymMapsResult", - modelProperties: { - synonymMaps: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SynonymMap", - }, - }, - }, - }, - }, - }, -}; - -export const SearchIndex: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndex", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - type: { - name: "String", - }, - }, - fields: { - serializedName: "fields", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchField", - }, - }, - }, - }, - scoringProfiles: { - serializedName: "scoringProfiles", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ScoringProfile", - }, - }, - }, - }, - defaultScoringProfile: { - serializedName: "defaultScoringProfile", - type: { - name: "String", - }, - }, - corsOptions: { - serializedName: "corsOptions", - type: { - name: "Composite", - className: "CorsOptions", - }, - }, - suggesters: { - serializedName: "suggesters", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "Suggester", - }, - }, - }, - }, - analyzers: { - serializedName: "analyzers", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "LexicalAnalyzer", - }, - }, - }, - }, - tokenizers: { - serializedName: "tokenizers", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "LexicalTokenizer", - }, - }, - }, - }, - tokenFilters: { - serializedName: "tokenFilters", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "TokenFilter", - }, - }, - }, - }, - charFilters: { - serializedName: "charFilters", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "CharFilter", - }, - }, - }, - }, - normalizers: { - serializedName: "normalizers", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "LexicalNormalizer", - }, - }, - }, - }, - encryptionKey: { - serializedName: "encryptionKey", - type: { - name: "Composite", - className: "SearchResourceEncryptionKey", - }, - }, - similarity: { - serializedName: "similarity", - type: { - name: "Composite", - className: "Similarity", - }, - }, - semanticSearch: { - serializedName: "semantic", - type: { - name: "Composite", - className: "SemanticSearch", - }, - }, - vectorSearch: { - serializedName: "vectorSearch", - type: { - name: "Composite", - className: "VectorSearch", - }, - }, - permissionFilterOption: { - serializedName: "permissionFilterOption", - nullable: true, - type: { - name: "String", - }, - }, - purviewEnabled: { - serializedName: "purviewEnabled", - nullable: true, - type: { - name: "Boolean", - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchField: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchField", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - key: { - serializedName: "key", - type: { - name: "Boolean", - }, - }, - retrievable: { - serializedName: "retrievable", - type: { - name: "Boolean", - }, - }, - stored: { - serializedName: "stored", - type: { - name: "Boolean", - }, - }, - searchable: { - serializedName: "searchable", - type: { - name: "Boolean", - }, - }, - filterable: { - serializedName: "filterable", - type: { - name: "Boolean", - }, - }, - sortable: { - serializedName: "sortable", - type: { - name: "Boolean", - }, - }, - facetable: { - serializedName: "facetable", - type: { - name: "Boolean", - }, - }, - permissionFilter: { - serializedName: "permissionFilter", - nullable: true, - type: { - name: "String", - }, - }, - sensitivityLabel: { - serializedName: "sensitivityLabel", - nullable: true, - type: { - name: "Boolean", - }, - }, - analyzer: { - serializedName: "analyzer", - nullable: true, - type: { - name: "String", - }, - }, - searchAnalyzer: { - serializedName: "searchAnalyzer", - nullable: true, - type: { - name: "String", - }, - }, - indexAnalyzer: { - serializedName: "indexAnalyzer", - nullable: true, - type: { - name: "String", - }, - }, - normalizer: { - serializedName: "normalizer", - nullable: true, - type: { - name: "String", - }, - }, - vectorSearchDimensions: { - constraints: { - InclusiveMaximum: 4096, - InclusiveMinimum: 2, - }, - serializedName: "dimensions", - nullable: true, - type: { - name: "Number", - }, - }, - vectorSearchProfileName: { - serializedName: "vectorSearchProfile", - nullable: true, - type: { - name: "String", - }, - }, - vectorEncodingFormat: { - serializedName: "vectorEncoding", - nullable: true, - type: { - name: "String", - }, - }, - synonymMaps: { - serializedName: "synonymMaps", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - fields: { - serializedName: "fields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchField", - }, - }, - }, - }, - }, - }, -}; - -export const ScoringProfile: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ScoringProfile", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - textWeights: { - serializedName: "text", - type: { - name: "Composite", - className: "TextWeights", - }, - }, - functions: { - serializedName: "functions", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "ScoringFunction", - }, - }, - }, - }, - functionAggregation: { - serializedName: "functionAggregation", - type: { - name: "Enum", - allowedValues: [ - "sum", - "average", - "minimum", - "maximum", - "firstMatching", - "product", - ], - }, - }, - }, - }, -}; - -export const TextWeights: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "TextWeights", - modelProperties: { - weights: { - serializedName: "weights", - required: true, - type: { - name: "Dictionary", - value: { type: { name: "Number" } }, - }, - }, - }, - }, -}; - -export const ScoringFunction: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ScoringFunction", - uberParent: "ScoringFunction", - polymorphicDiscriminator: { - serializedName: "type", - clientName: "type", - }, - modelProperties: { - type: { - serializedName: "type", - required: true, - type: { - name: "String", - }, - }, - fieldName: { - serializedName: "fieldName", - required: true, - type: { - name: "String", - }, - }, - boost: { - serializedName: "boost", - required: true, - type: { - name: "Number", - }, - }, - interpolation: { - serializedName: "interpolation", - type: { - name: "Enum", - allowedValues: ["linear", "constant", "quadratic", "logarithmic"], - }, - }, - }, - }, -}; - -export const CorsOptions: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CorsOptions", - modelProperties: { - allowedOrigins: { - serializedName: "allowedOrigins", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - maxAgeInSeconds: { - serializedName: "maxAgeInSeconds", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const Suggester: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "Suggester", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - searchMode: { - defaultValue: "analyzingInfixMatching", - isConstant: true, - serializedName: "searchMode", - type: { - name: "String", - }, - }, - sourceFields: { - serializedName: "sourceFields", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const LexicalAnalyzer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "LexicalAnalyzer", - uberParent: "LexicalAnalyzer", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const LexicalTokenizer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "LexicalTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const TokenFilter: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "TokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const CharFilter: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CharFilter", - uberParent: "CharFilter", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const LexicalNormalizer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "LexicalNormalizer", - uberParent: "LexicalNormalizer", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const Similarity: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "Similarity", - uberParent: "Similarity", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SemanticSearch: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SemanticSearch", - modelProperties: { - defaultConfigurationName: { - serializedName: "defaultConfiguration", - type: { - name: "String", - }, - }, - configurations: { - serializedName: "configurations", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SemanticConfiguration", - }, - }, - }, - }, - }, - }, -}; - -export const SemanticConfiguration: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SemanticConfiguration", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - prioritizedFields: { - serializedName: "prioritizedFields", - type: { - name: "Composite", - className: "SemanticPrioritizedFields", - }, - }, - rankingOrder: { - serializedName: "rankingOrder", - nullable: true, - type: { - name: "String", - }, - }, - flightingOptIn: { - defaultValue: false, - serializedName: "flightingOptIn", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const SemanticPrioritizedFields: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SemanticPrioritizedFields", - modelProperties: { - titleField: { - serializedName: "titleField", - type: { - name: "Composite", - className: "SemanticField", - }, - }, - contentFields: { - serializedName: "prioritizedContentFields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SemanticField", - }, - }, - }, - }, - keywordsFields: { - serializedName: "prioritizedKeywordsFields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SemanticField", - }, - }, - }, - }, - }, - }, -}; - -export const SemanticField: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SemanticField", - modelProperties: { - name: { - serializedName: "fieldName", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorSearch: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorSearch", - modelProperties: { - profiles: { - serializedName: "profiles", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "VectorSearchProfile", - }, - }, - }, - }, - algorithms: { - serializedName: "algorithms", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "VectorSearchAlgorithmConfiguration", - }, - }, - }, - }, - vectorizers: { - serializedName: "vectorizers", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "VectorSearchVectorizer", - }, - }, - }, - }, - compressions: { - serializedName: "compressions", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "VectorSearchCompression", - }, - }, - }, - }, - }, - }, -}; - -export const VectorSearchProfile: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorSearchProfile", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - algorithmConfigurationName: { - serializedName: "algorithm", - required: true, - type: { - name: "String", - }, - }, - vectorizerName: { - serializedName: "vectorizer", - type: { - name: "String", - }, - }, - compressionName: { - serializedName: "compression", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorSearchAlgorithmConfiguration: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorSearchAlgorithmConfiguration", - uberParent: "VectorSearchAlgorithmConfiguration", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorSearchVectorizer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorSearchVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - vectorizerName: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorSearchCompression: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorSearchCompression", - uberParent: "VectorSearchCompression", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - compressionName: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - rescoringOptions: { - serializedName: "rescoringOptions", - type: { - name: "Composite", - className: "RescoringOptions", - }, - }, - truncationDimension: { - serializedName: "truncationDimension", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const RescoringOptions: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "RescoringOptions", - modelProperties: { - enableRescoring: { - defaultValue: true, - serializedName: "enableRescoring", - nullable: true, - type: { - name: "Boolean", - }, - }, - defaultOversampling: { - serializedName: "defaultOversampling", - nullable: true, - type: { - name: "Number", - }, - }, - rescoreStorageMethod: { - serializedName: "rescoreStorageMethod", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ListIndexesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListIndexesResult", - modelProperties: { - indexes: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndex", - }, - }, - }, - }, - }, - }, -}; - -export const GetIndexStatisticsResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "GetIndexStatisticsResult", - modelProperties: { - documentCount: { - serializedName: "documentCount", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - storageSize: { - serializedName: "storageSize", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - vectorIndexSize: { - serializedName: "vectorIndexSize", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const AnalyzeRequest: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AnalyzeRequest", - modelProperties: { - text: { - serializedName: "text", - required: true, - type: { - name: "String", - }, - }, - analyzer: { - serializedName: "analyzer", - type: { - name: "String", - }, - }, - tokenizer: { - serializedName: "tokenizer", - type: { - name: "String", - }, - }, - normalizer: { - serializedName: "normalizer", - type: { - name: "String", - }, - }, - tokenFilters: { - serializedName: "tokenFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - charFilters: { - serializedName: "charFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const AnalyzeResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AnalyzeResult", - modelProperties: { - tokens: { - serializedName: "tokens", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "AnalyzedTokenInfo", - }, - }, - }, - }, - }, - }, -}; - -export const AnalyzedTokenInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AnalyzedTokenInfo", - modelProperties: { - token: { - serializedName: "token", - required: true, - readOnly: true, - type: { - name: "String", - }, - }, - startOffset: { - serializedName: "startOffset", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - endOffset: { - serializedName: "endOffset", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - position: { - serializedName: "position", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SearchAlias: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchAlias", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - indexes: { - serializedName: "indexes", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ListAliasesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListAliasesResult", - modelProperties: { - aliases: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchAlias", - }, - }, - }, - }, - }, - }, -}; - -export const ServiceStatistics: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ServiceStatistics", - modelProperties: { - counters: { - serializedName: "counters", - type: { - name: "Composite", - className: "ServiceCounters", - }, - }, - indexersRuntime: { - serializedName: "indexersRuntime", - type: { - name: "Composite", - className: "ServiceIndexersRuntime", - }, - }, - limits: { - serializedName: "limits", - type: { - name: "Composite", - className: "ServiceLimits", - }, - }, - }, - }, -}; - -export const ServiceCounters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ServiceCounters", - modelProperties: { - aliasCounter: { - serializedName: "aliasesCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - documentCounter: { - serializedName: "documentCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - indexCounter: { - serializedName: "indexesCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - indexerCounter: { - serializedName: "indexersCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - dataSourceCounter: { - serializedName: "dataSourcesCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - storageSizeCounter: { - serializedName: "storageSize", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - synonymMapCounter: { - serializedName: "synonymMaps", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - skillsetCounter: { - serializedName: "skillsetCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - vectorIndexSizeCounter: { - serializedName: "vectorIndexSize", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, - }, - }, -}; - -export const ResourceCounter: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ResourceCounter", - modelProperties: { - usage: { - serializedName: "usage", - required: true, - type: { - name: "Number", - }, - }, - quota: { - serializedName: "quota", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const ServiceIndexersRuntime: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ServiceIndexersRuntime", - modelProperties: { - usedSeconds: { - serializedName: "usedSeconds", - required: true, - type: { - name: "Number", - }, - }, - remainingSeconds: { - serializedName: "remainingSeconds", - nullable: true, - type: { - name: "Number", - }, - }, - beginningTime: { - serializedName: "beginningTime", - required: true, - type: { - name: "DateTime", - }, - }, - endingTime: { - serializedName: "endingTime", - required: true, - type: { - name: "DateTime", - }, - }, - }, - }, -}; - -export const ServiceLimits: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ServiceLimits", - modelProperties: { - maxFieldsPerIndex: { - serializedName: "maxFieldsPerIndex", - nullable: true, - type: { - name: "Number", - }, - }, - maxFieldNestingDepthPerIndex: { - serializedName: "maxFieldNestingDepthPerIndex", - nullable: true, - type: { - name: "Number", - }, - }, - maxComplexCollectionFieldsPerIndex: { - serializedName: "maxComplexCollectionFieldsPerIndex", - nullable: true, - type: { - name: "Number", - }, - }, - maxComplexObjectsInCollectionsPerDocument: { - serializedName: "maxComplexObjectsInCollectionsPerDocument", - nullable: true, - type: { - name: "Number", - }, - }, - maxStoragePerIndexInBytes: { - serializedName: "maxStoragePerIndex", - nullable: true, - type: { - name: "Number", - }, - }, - maxCumulativeIndexerRuntimeSeconds: { - serializedName: "maxCumulativeIndexerRuntimeSeconds", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const ListIndexStatsSummary: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListIndexStatsSummary", - modelProperties: { - indexesStatistics: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "IndexStatisticsSummary", - }, - }, - }, - }, - }, - }, -}; - -export const IndexStatisticsSummary: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexStatisticsSummary", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - documentCount: { - serializedName: "documentCount", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - storageSize: { - serializedName: "storageSize", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - vectorIndexSize: { - serializedName: "vectorIndexSize", - required: true, - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const AzureOpenAIParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AzureOpenAIParameters", - modelProperties: { - resourceUrl: { - serializedName: "resourceUri", - type: { - name: "String", - }, - }, - deploymentId: { - serializedName: "deploymentId", - type: { - name: "String", - }, - }, - apiKey: { - serializedName: "apiKey", - type: { - name: "String", - }, - }, - authIdentity: { - serializedName: "authIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - modelName: { - serializedName: "modelName", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexKnowledgeSourceParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexKnowledgeSourceParameters", - modelProperties: { - searchIndexName: { - serializedName: "searchIndexName", - required: true, - type: { - name: "String", - }, - }, - sourceDataFields: { - serializedName: "sourceDataFields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexFieldReference", - }, - }, - }, - }, - searchFields: { - serializedName: "searchFields", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchIndexFieldReference", - }, - }, - }, - }, - semanticConfigurationName: { - serializedName: "semanticConfigurationName", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const SearchIndexFieldReference: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexFieldReference", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AzureBlobKnowledgeSourceParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AzureBlobKnowledgeSourceParameters", - modelProperties: { - connectionString: { - serializedName: "connectionString", - required: true, - type: { - name: "String", - }, - }, - containerName: { - serializedName: "containerName", - required: true, - type: { - name: "String", - }, - }, - folderPath: { - serializedName: "folderPath", - nullable: true, - type: { - name: "String", - }, - }, - isAdlsGen2: { - defaultValue: false, - serializedName: "isADLSGen2", - type: { - name: "Boolean", - }, - }, - ingestionParameters: { - serializedName: "ingestionParameters", - type: { - name: "Composite", - className: "KnowledgeSourceIngestionParameters", - }, - }, - createdResources: { - serializedName: "createdResources", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "String" } }, - }, - }, - }, - }, -}; - -export const KnowledgeSourceIngestionParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSourceIngestionParameters", - modelProperties: { - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - embeddingModel: { - serializedName: "embeddingModel", - type: { - name: "Composite", - className: "KnowledgeSourceVectorizer", - }, - }, - chatCompletionModel: { - serializedName: "chatCompletionModel", - type: { - name: "Composite", - className: "KnowledgeBaseModel", - }, - }, - disableImageVerbalization: { - defaultValue: false, - serializedName: "disableImageVerbalization", - type: { - name: "Boolean", - }, - }, - ingestionSchedule: { - serializedName: "ingestionSchedule", - type: { - name: "Composite", - className: "IndexingSchedule", - }, - }, - ingestionPermissionOptions: { - serializedName: "ingestionPermissionOptions", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - contentExtractionMode: { - defaultValue: "minimal", - serializedName: "contentExtractionMode", - nullable: true, - type: { - name: "String", - }, - }, - aiServices: { - serializedName: "aiServices", - type: { - name: "Composite", - className: "AIServices", - }, - }, - }, - }, -}; - -export const KnowledgeSourceVectorizer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "KnowledgeSourceVectorizer", - uberParent: "KnowledgeSourceVectorizer", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AIServices: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AIServices", - modelProperties: { - uri: { - serializedName: "uri", - required: true, - type: { - name: "String", - }, - }, - apiKey: { - serializedName: "apiKey", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const IndexedSharePointKnowledgeSourceParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "IndexedSharePointKnowledgeSourceParameters", - modelProperties: { - connectionString: { - serializedName: "connectionString", - required: true, - type: { - name: "String", - }, - }, - containerName: { - serializedName: "containerName", - required: true, - type: { - name: "String", - }, - }, - query: { - serializedName: "query", - nullable: true, - type: { - name: "String", - }, - }, - ingestionParameters: { - serializedName: "ingestionParameters", - type: { - name: "Composite", - className: "KnowledgeSourceIngestionParameters", - }, - }, - createdResources: { - serializedName: "createdResources", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "String" } }, - }, - }, - }, - }, - }; - -export const IndexedOneLakeKnowledgeSourceParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "IndexedOneLakeKnowledgeSourceParameters", - modelProperties: { - fabricWorkspaceId: { - serializedName: "fabricWorkspaceId", - required: true, - type: { - name: "String", - }, - }, - lakehouseId: { - serializedName: "lakehouseId", - required: true, - type: { - name: "String", - }, - }, - targetPath: { - serializedName: "targetPath", - nullable: true, - type: { - name: "String", - }, - }, - ingestionParameters: { - serializedName: "ingestionParameters", - type: { - name: "Composite", - className: "KnowledgeSourceIngestionParameters", - }, - }, - createdResources: { - serializedName: "createdResources", - readOnly: true, - type: { - name: "Dictionary", - value: { type: { name: "String" } }, - }, - }, - }, - }, - }; - -export const WebKnowledgeSourceParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "WebKnowledgeSourceParameters", - modelProperties: { - domains: { - serializedName: "domains", - type: { - name: "Composite", - className: "WebKnowledgeSourceDomains", - }, - }, - }, - }, -}; - -export const WebKnowledgeSourceDomains: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "WebKnowledgeSourceDomains", - modelProperties: { - allowedDomains: { - serializedName: "allowedDomains", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "WebKnowledgeSourceDomain", - }, - }, - }, - }, - blockedDomains: { - serializedName: "blockedDomains", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "WebKnowledgeSourceDomain", - }, - }, - }, - }, - }, - }, -}; - -export const WebKnowledgeSourceDomain: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "WebKnowledgeSourceDomain", - modelProperties: { - address: { - serializedName: "address", - required: true, - type: { - name: "String", - }, - }, - includeSubpages: { - serializedName: "includeSubpages", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const RemoteSharePointKnowledgeSourceParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "RemoteSharePointKnowledgeSourceParameters", - modelProperties: { - filterExpression: { - serializedName: "filterExpression", - type: { - name: "String", - }, - }, - resourceMetadata: { - serializedName: "resourceMetadata", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - containerTypeId: { - serializedName: "containerTypeId", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const HnswParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "HnswParameters", - modelProperties: { - m: { - defaultValue: 4, - constraints: { - InclusiveMaximum: 10, - InclusiveMinimum: 4, - }, - serializedName: "m", - nullable: true, - type: { - name: "Number", - }, - }, - efConstruction: { - defaultValue: 400, - constraints: { - InclusiveMaximum: 1000, - InclusiveMinimum: 100, - }, - serializedName: "efConstruction", - nullable: true, - type: { - name: "Number", - }, - }, - efSearch: { - defaultValue: 500, - constraints: { - InclusiveMaximum: 1000, - InclusiveMinimum: 100, - }, - serializedName: "efSearch", - nullable: true, - type: { - name: "Number", - }, - }, - metric: { - serializedName: "metric", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ExhaustiveKnnParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ExhaustiveKnnParameters", - modelProperties: { - metric: { - serializedName: "metric", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ScalarQuantizationParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ScalarQuantizationParameters", - modelProperties: { - quantizedDataType: { - serializedName: "quantizedDataType", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const WebApiParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "WebApiParameters", - modelProperties: { - uri: { - serializedName: "uri", - type: { - name: "String", - }, - }, - httpHeaders: { - serializedName: "httpHeaders", - type: { - name: "Dictionary", - value: { type: { name: "String" } }, - }, - }, - httpMethod: { - serializedName: "httpMethod", - type: { - name: "String", - }, - }, - timeout: { - serializedName: "timeout", - type: { - name: "TimeSpan", - }, - }, - authResourceId: { - serializedName: "authResourceId", - nullable: true, - type: { - name: "String", - }, - }, - authIdentity: { - serializedName: "authIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - -export const AIServicesVisionParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AIServicesVisionParameters", - modelProperties: { - modelVersion: { - serializedName: "modelVersion", - required: true, - nullable: true, - type: { - name: "String", - }, - }, - resourceUri: { - serializedName: "resourceUri", - required: true, - type: { - name: "String", - }, - }, - apiKey: { - serializedName: "apiKey", - type: { - name: "String", - }, - }, - authIdentity: { - serializedName: "authIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - -export const AMLParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AMLParameters", - modelProperties: { - scoringUri: { - serializedName: "uri", - required: true, - nullable: true, - type: { - name: "String", - }, - }, - authenticationKey: { - serializedName: "key", - nullable: true, - type: { - name: "String", - }, - }, - resourceId: { - serializedName: "resourceId", - nullable: true, - type: { - name: "String", - }, - }, - timeout: { - serializedName: "timeout", - nullable: true, - type: { - name: "TimeSpan", - }, - }, - region: { - serializedName: "region", - nullable: true, - type: { - name: "String", - }, - }, - modelName: { - serializedName: "modelName", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DistanceScoringParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DistanceScoringParameters", - modelProperties: { - referencePointParameter: { - serializedName: "referencePointParameter", - required: true, - type: { - name: "String", - }, - }, - boostingDistance: { - serializedName: "boostingDistance", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const FreshnessScoringParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "FreshnessScoringParameters", - modelProperties: { - boostingDuration: { - serializedName: "boostingDuration", - required: true, - type: { - name: "TimeSpan", - }, - }, - }, - }, -}; - -export const MagnitudeScoringParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "MagnitudeScoringParameters", - modelProperties: { - boostingRangeStart: { - serializedName: "boostingRangeStart", - required: true, - type: { - name: "Number", - }, - }, - boostingRangeEnd: { - serializedName: "boostingRangeEnd", - required: true, - type: { - name: "Number", - }, - }, - shouldBoostBeyondRangeByConstant: { - serializedName: "constantBoostBeyondRange", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const TagScoringParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "TagScoringParameters", - modelProperties: { - tagsParameter: { - serializedName: "tagsParameter", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const CustomEntity: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CustomEntity", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - nullable: true, - type: { - name: "String", - }, - }, - type: { - serializedName: "type", - nullable: true, - type: { - name: "String", - }, - }, - subtype: { - serializedName: "subtype", - nullable: true, - type: { - name: "String", - }, - }, - id: { - serializedName: "id", - nullable: true, - type: { - name: "String", - }, - }, - caseSensitive: { - serializedName: "caseSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - accentSensitive: { - serializedName: "accentSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - fuzzyEditDistance: { - serializedName: "fuzzyEditDistance", - nullable: true, - type: { - name: "Number", - }, - }, - defaultCaseSensitive: { - serializedName: "defaultCaseSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - defaultAccentSensitive: { - serializedName: "defaultAccentSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - defaultFuzzyEditDistance: { - serializedName: "defaultFuzzyEditDistance", - nullable: true, - type: { - name: "Number", - }, - }, - aliases: { - serializedName: "aliases", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "CustomEntityAlias", - }, - }, - }, - }, - }, - }, -}; - -export const CustomEntityAlias: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CustomEntityAlias", - modelProperties: { - text: { - serializedName: "text", - required: true, - type: { - name: "String", - }, - }, - caseSensitive: { - serializedName: "caseSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - accentSensitive: { - serializedName: "accentSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - fuzzyEditDistance: { - serializedName: "fuzzyEditDistance", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const AzureOpenAITokenizerParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AzureOpenAITokenizerParameters", - modelProperties: { - encoderModelName: { - serializedName: "encoderModelName", - nullable: true, - type: { - name: "String", - }, - }, - allowedSpecialTokens: { - serializedName: "allowedSpecialTokens", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const DocumentIntelligenceLayoutSkillChunkingProperties: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "DocumentIntelligenceLayoutSkillChunkingProperties", - modelProperties: { - unit: { - defaultValue: "characters", - serializedName: "unit", - nullable: true, - type: { - name: "String", - }, - }, - maximumLength: { - serializedName: "maximumLength", - nullable: true, - type: { - name: "Number", - }, - }, - overlapLength: { - serializedName: "overlapLength", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, - }; - -export const CommonModelParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "CommonModelParameters", - modelProperties: { - model: { - serializedName: "model", - nullable: true, - type: { - name: "String", - }, - }, - frequencyPenalty: { - defaultValue: 0, - serializedName: "frequencyPenalty", - nullable: true, - type: { - name: "Number", - }, - }, - presencePenalty: { - defaultValue: 0, - serializedName: "presencePenalty", - nullable: true, - type: { - name: "Number", - }, - }, - maxTokens: { - serializedName: "maxTokens", - nullable: true, - type: { - name: "Number", - }, - }, - temperature: { - defaultValue: 0.7, - serializedName: "temperature", - nullable: true, - type: { - name: "Number", - }, - }, - seed: { - serializedName: "seed", - nullable: true, - type: { - name: "Number", - }, - }, - stop: { - serializedName: "stop", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const ChatCompletionResponseFormat: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ChatCompletionResponseFormat", - modelProperties: { - type: { - defaultValue: "text", - serializedName: "type", - type: { - name: "String", - }, - }, - chatCompletionSchemaProperties: { - serializedName: "jsonSchemaProperties", - type: { - name: "Composite", - className: "ChatCompletionResponseFormatJsonSchemaProperties", - }, - }, - }, - }, -}; - -export const ChatCompletionResponseFormatJsonSchemaProperties: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "ChatCompletionResponseFormatJsonSchemaProperties", - modelProperties: { - name: { - serializedName: "name", - nullable: true, - type: { - name: "String", - }, - }, - description: { - serializedName: "description", - nullable: true, - type: { - name: "String", - }, - }, - strict: { - defaultValue: true, - serializedName: "strict", - type: { - name: "Boolean", - }, - }, - schema: { - serializedName: "schema", - type: { - name: "Composite", - className: "ChatCompletionSchema", - }, - }, - }, - }, - }; - -export const ChatCompletionSchema: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ChatCompletionSchema", - modelProperties: { - type: { - defaultValue: "object", - serializedName: "type", - type: { - name: "String", - }, - }, - properties: { - serializedName: "properties", - type: { - name: "String", - }, - }, - required: { - serializedName: "required", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - additionalProperties: { - defaultValue: false, - serializedName: "additionalProperties", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const ContentUnderstandingSkillChunkingProperties: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "ContentUnderstandingSkillChunkingProperties", - modelProperties: { - unit: { - defaultValue: "characters", - serializedName: "unit", - nullable: true, - type: { - name: "String", - }, - }, - maximumLength: { - serializedName: "maximumLength", - nullable: true, - type: { - name: "Number", - }, - }, - overlapLength: { - serializedName: "overlapLength", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, - }; - -export const KnowledgeBaseAzureOpenAIModel: coreClient.CompositeMapper = { - serializedName: "azureOpenAI", - type: { - name: "Composite", - className: "KnowledgeBaseAzureOpenAIModel", - uberParent: "KnowledgeBaseModel", - polymorphicDiscriminator: KnowledgeBaseModel.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeBaseModel.type.modelProperties, - azureOpenAIParameters: { - serializedName: "azureOpenAIParameters", - type: { - name: "Composite", - className: "AzureOpenAIParameters", - }, - }, - }, - }, -}; - -export const KnowledgeRetrievalMinimalReasoningEffort: coreClient.CompositeMapper = - { - serializedName: "minimal", - type: { - name: "Composite", - className: "KnowledgeRetrievalMinimalReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: - KnowledgeRetrievalReasoningEffort.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalReasoningEffort.type.modelProperties, - }, - }, - }; - -export const KnowledgeRetrievalLowReasoningEffort: coreClient.CompositeMapper = - { - serializedName: "low", - type: { - name: "Composite", - className: "KnowledgeRetrievalLowReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: - KnowledgeRetrievalReasoningEffort.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalReasoningEffort.type.modelProperties, - }, - }, - }; - -export const KnowledgeRetrievalMediumReasoningEffort: coreClient.CompositeMapper = - { - serializedName: "medium", - type: { - name: "Composite", - className: "KnowledgeRetrievalMediumReasoningEffort", - uberParent: "KnowledgeRetrievalReasoningEffort", - polymorphicDiscriminator: - KnowledgeRetrievalReasoningEffort.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeRetrievalReasoningEffort.type.modelProperties, - }, - }, - }; - -export const SearchIndexerDataNoneIdentity: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.DataNoneIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataNoneIdentity", - uberParent: "SearchIndexerDataIdentity", - polymorphicDiscriminator: - SearchIndexerDataIdentity.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerDataIdentity.type.modelProperties, - }, - }, -}; - -export const SearchIndexerDataUserAssignedIdentity: coreClient.CompositeMapper = - { - serializedName: "#Microsoft.Azure.Search.DataUserAssignedIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataUserAssignedIdentity", - uberParent: "SearchIndexerDataIdentity", - polymorphicDiscriminator: - SearchIndexerDataIdentity.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerDataIdentity.type.modelProperties, - resourceId: { - serializedName: "userAssignedIdentity", - required: true, - type: { - name: "String", - }, - }, - }, - }, - }; - -export const SearchIndexKnowledgeSource: coreClient.CompositeMapper = { - serializedName: "searchIndex", - type: { - name: "Composite", - className: "SearchIndexKnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: KnowledgeSource.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSource.type.modelProperties, - searchIndexParameters: { - serializedName: "searchIndexParameters", - type: { - name: "Composite", - className: "SearchIndexKnowledgeSourceParameters", - }, - }, - }, - }, -}; - -export const AzureBlobKnowledgeSource: coreClient.CompositeMapper = { - serializedName: "azureBlob", - type: { - name: "Composite", - className: "AzureBlobKnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: KnowledgeSource.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSource.type.modelProperties, - azureBlobParameters: { - serializedName: "azureBlobParameters", - type: { - name: "Composite", - className: "AzureBlobKnowledgeSourceParameters", - }, - }, - }, - }, -}; - -export const IndexedSharePointKnowledgeSource: coreClient.CompositeMapper = { - serializedName: "indexedSharePoint", - type: { - name: "Composite", - className: "IndexedSharePointKnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: KnowledgeSource.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSource.type.modelProperties, - indexedSharePointParameters: { - serializedName: "indexedSharePointParameters", - type: { - name: "Composite", - className: "IndexedSharePointKnowledgeSourceParameters", - }, - }, - }, - }, -}; - -export const IndexedOneLakeKnowledgeSource: coreClient.CompositeMapper = { - serializedName: "indexedOneLake", - type: { - name: "Composite", - className: "IndexedOneLakeKnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: KnowledgeSource.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSource.type.modelProperties, - indexedOneLakeParameters: { - serializedName: "indexedOneLakeParameters", - type: { - name: "Composite", - className: "IndexedOneLakeKnowledgeSourceParameters", - }, - }, - }, - }, -}; - -export const WebKnowledgeSource: coreClient.CompositeMapper = { - serializedName: "web", - type: { - name: "Composite", - className: "WebKnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: KnowledgeSource.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSource.type.modelProperties, - webParameters: { - serializedName: "webParameters", - type: { - name: "Composite", - className: "WebKnowledgeSourceParameters", - }, - }, - }, - }, -}; - -export const RemoteSharePointKnowledgeSource: coreClient.CompositeMapper = { - serializedName: "remoteSharePoint", - type: { - name: "Composite", - className: "RemoteSharePointKnowledgeSource", - uberParent: "KnowledgeSource", - polymorphicDiscriminator: KnowledgeSource.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSource.type.modelProperties, - remoteSharePointParameters: { - serializedName: "remoteSharePointParameters", - type: { - name: "Composite", - className: "RemoteSharePointKnowledgeSourceParameters", - }, - }, - }, - }, -}; - -export const HighWaterMarkChangeDetectionPolicy: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy", - type: { - name: "Composite", - className: "HighWaterMarkChangeDetectionPolicy", - uberParent: "DataChangeDetectionPolicy", - polymorphicDiscriminator: - DataChangeDetectionPolicy.type.polymorphicDiscriminator, - modelProperties: { - ...DataChangeDetectionPolicy.type.modelProperties, - highWaterMarkColumnName: { - serializedName: "highWaterMarkColumnName", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SqlIntegratedChangeTrackingPolicy: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy", - type: { - name: "Composite", - className: "SqlIntegratedChangeTrackingPolicy", - uberParent: "DataChangeDetectionPolicy", - polymorphicDiscriminator: - DataChangeDetectionPolicy.type.polymorphicDiscriminator, - modelProperties: { - ...DataChangeDetectionPolicy.type.modelProperties, - }, - }, -}; - -export const SoftDeleteColumnDeletionDetectionPolicy: coreClient.CompositeMapper = - { - serializedName: - "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy", - type: { - name: "Composite", - className: "SoftDeleteColumnDeletionDetectionPolicy", - uberParent: "DataDeletionDetectionPolicy", - polymorphicDiscriminator: - DataDeletionDetectionPolicy.type.polymorphicDiscriminator, - modelProperties: { - ...DataDeletionDetectionPolicy.type.modelProperties, - softDeleteColumnName: { - serializedName: "softDeleteColumnName", - type: { - name: "String", - }, - }, - softDeleteMarkerValue: { - serializedName: "softDeleteMarkerValue", - type: { - name: "String", - }, - }, - }, - }, - }; - -export const NativeBlobSoftDeleteDeletionDetectionPolicy: coreClient.CompositeMapper = - { - serializedName: - "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy", - type: { - name: "Composite", - className: "NativeBlobSoftDeleteDeletionDetectionPolicy", - uberParent: "DataDeletionDetectionPolicy", - polymorphicDiscriminator: - DataDeletionDetectionPolicy.type.polymorphicDiscriminator, - modelProperties: { - ...DataDeletionDetectionPolicy.type.modelProperties, - }, - }, - }; - -export const ConditionalSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.ConditionalSkill", - type: { - name: "Composite", - className: "ConditionalSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - }, - }, -}; - -export const KeyPhraseExtractionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill", - type: { - name: "Composite", - className: "KeyPhraseExtractionSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String", - }, - }, - maxKeyPhraseCount: { - serializedName: "maxKeyPhraseCount", - nullable: true, - type: { - name: "Number", - }, - }, - modelVersion: { - serializedName: "modelVersion", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const OcrSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Vision.OcrSkill", - type: { - name: "Composite", - className: "OcrSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String", - }, - }, - shouldDetectOrientation: { - defaultValue: false, - serializedName: "detectOrientation", - type: { - name: "Boolean", - }, - }, - lineEnding: { - serializedName: "lineEnding", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ImageAnalysisSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Vision.ImageAnalysisSkill", - type: { - name: "Composite", - className: "ImageAnalysisSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String", - }, - }, - visualFeatures: { - serializedName: "visualFeatures", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - details: { - serializedName: "details", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const LanguageDetectionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.LanguageDetectionSkill", - type: { - name: "Composite", - className: "LanguageDetectionSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultCountryHint: { - serializedName: "defaultCountryHint", - nullable: true, - type: { - name: "String", - }, - }, - modelVersion: { - serializedName: "modelVersion", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ShaperSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.ShaperSkill", - type: { - name: "Composite", - className: "ShaperSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - }, - }, -}; - -export const MergeSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.MergeSkill", - type: { - name: "Composite", - className: "MergeSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - insertPreTag: { - defaultValue: " ", - serializedName: "insertPreTag", - type: { - name: "String", - }, - }, - insertPostTag: { - defaultValue: " ", - serializedName: "insertPostTag", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const EntityRecognitionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.EntityRecognitionSkill", - type: { - name: "Composite", - className: "EntityRecognitionSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - categories: { - serializedName: "categories", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String", - }, - }, - includeTypelessEntities: { - serializedName: "includeTypelessEntities", - nullable: true, - type: { - name: "Boolean", - }, - }, - minimumPrecision: { - serializedName: "minimumPrecision", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SentimentSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.SentimentSkill", - type: { - name: "Composite", - className: "SentimentSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SentimentSkillV3: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.V3.SentimentSkill", - type: { - name: "Composite", - className: "SentimentSkillV3", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - nullable: true, - type: { - name: "String", - }, - }, - includeOpinionMining: { - defaultValue: false, - serializedName: "includeOpinionMining", - type: { - name: "Boolean", - }, - }, - modelVersion: { - serializedName: "modelVersion", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const EntityLinkingSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.V3.EntityLinkingSkill", - type: { - name: "Composite", - className: "EntityLinkingSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - nullable: true, - type: { - name: "String", - }, - }, - minimumPrecision: { - constraints: { - InclusiveMaximum: 1, - InclusiveMinimum: 0, - }, - serializedName: "minimumPrecision", - nullable: true, - type: { - name: "Number", - }, - }, - modelVersion: { - serializedName: "modelVersion", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const EntityRecognitionSkillV3: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill", - type: { - name: "Composite", - className: "EntityRecognitionSkillV3", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - categories: { - serializedName: "categories", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - nullable: true, - type: { - name: "String", - }, - }, - minimumPrecision: { - constraints: { - InclusiveMaximum: 1, - InclusiveMinimum: 0, - }, - serializedName: "minimumPrecision", - nullable: true, - type: { - name: "Number", - }, - }, - modelVersion: { - serializedName: "modelVersion", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const PIIDetectionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.PIIDetectionSkill", - type: { - name: "Composite", - className: "PIIDetectionSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - nullable: true, - type: { - name: "String", - }, - }, - minimumPrecision: { - constraints: { - InclusiveMaximum: 1, - InclusiveMinimum: 0, - }, - serializedName: "minimumPrecision", - nullable: true, - type: { - name: "Number", - }, - }, - maskingMode: { - serializedName: "maskingMode", - type: { - name: "String", - }, - }, - maskingCharacter: { - constraints: { - MaxLength: 1, - }, - serializedName: "maskingCharacter", - nullable: true, - type: { - name: "String", - }, - }, - modelVersion: { - serializedName: "modelVersion", - nullable: true, - type: { - name: "String", - }, - }, - categories: { - serializedName: "piiCategories", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - domain: { - serializedName: "domain", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SplitSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.SplitSkill", - type: { - name: "Composite", - className: "SplitSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - type: { - name: "String", - }, - }, - textSplitMode: { - serializedName: "textSplitMode", - type: { - name: "String", - }, - }, - maxPageLength: { - serializedName: "maximumPageLength", - nullable: true, - type: { - name: "Number", - }, - }, - pageOverlapLength: { - serializedName: "pageOverlapLength", - nullable: true, - type: { - name: "Number", - }, - }, - maximumPagesToTake: { - serializedName: "maximumPagesToTake", - nullable: true, - type: { - name: "Number", - }, - }, - unit: { - serializedName: "unit", - nullable: true, - type: { - name: "String", - }, - }, - azureOpenAITokenizerParameters: { - serializedName: "azureOpenAITokenizerParameters", - type: { - name: "Composite", - className: "AzureOpenAITokenizerParameters", - }, - }, - }, - }, -}; - -export const CustomEntityLookupSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.CustomEntityLookupSkill", - type: { - name: "Composite", - className: "CustomEntityLookupSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultLanguageCode: { - serializedName: "defaultLanguageCode", - nullable: true, - type: { - name: "String", - }, - }, - entitiesDefinitionUri: { - serializedName: "entitiesDefinitionUri", - nullable: true, - type: { - name: "String", - }, - }, - inlineEntitiesDefinition: { - serializedName: "inlineEntitiesDefinition", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "CustomEntity", - }, - }, - }, - }, - globalDefaultCaseSensitive: { - serializedName: "globalDefaultCaseSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - globalDefaultAccentSensitive: { - serializedName: "globalDefaultAccentSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - globalDefaultFuzzyEditDistance: { - serializedName: "globalDefaultFuzzyEditDistance", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const TextTranslationSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.TranslationSkill", - type: { - name: "Composite", - className: "TextTranslationSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultToLanguageCode: { - serializedName: "defaultToLanguageCode", - required: true, - type: { - name: "String", - }, - }, - defaultFromLanguageCode: { - serializedName: "defaultFromLanguageCode", - type: { - name: "String", - }, - }, - suggestedFrom: { - serializedName: "suggestedFrom", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DocumentExtractionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.DocumentExtractionSkill", - type: { - name: "Composite", - className: "DocumentExtractionSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - parsingMode: { - serializedName: "parsingMode", - nullable: true, - type: { - name: "String", - }, - }, - dataToExtract: { - serializedName: "dataToExtract", - nullable: true, - type: { - name: "String", - }, - }, - configuration: { - serializedName: "configuration", - nullable: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const DocumentIntelligenceLayoutSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill", - type: { - name: "Composite", - className: "DocumentIntelligenceLayoutSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - outputFormat: { - defaultValue: "markdown", - serializedName: "outputFormat", - nullable: true, - type: { - name: "String", - }, - }, - outputMode: { - defaultValue: "oneToMany", - serializedName: "outputMode", - nullable: true, - type: { - name: "String", - }, - }, - markdownHeaderDepth: { - defaultValue: "h6", - serializedName: "markdownHeaderDepth", - nullable: true, - type: { - name: "String", - }, - }, - extractionOptions: { - serializedName: "extractionOptions", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - chunkingProperties: { - serializedName: "chunkingProperties", - type: { - name: "Composite", - className: "DocumentIntelligenceLayoutSkillChunkingProperties", - }, - }, - }, - }, -}; - -export const WebApiSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Custom.WebApiSkill", - type: { - name: "Composite", - className: "WebApiSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - uri: { - serializedName: "uri", - required: true, - type: { - name: "String", - }, - }, - httpHeaders: { - serializedName: "httpHeaders", - type: { - name: "Dictionary", - value: { type: { name: "String" } }, - }, - }, - httpMethod: { - serializedName: "httpMethod", - type: { - name: "String", - }, - }, - timeout: { - serializedName: "timeout", - type: { - name: "TimeSpan", - }, - }, - batchSize: { - serializedName: "batchSize", - nullable: true, - type: { - name: "Number", - }, - }, - degreeOfParallelism: { - serializedName: "degreeOfParallelism", - nullable: true, - type: { - name: "Number", - }, - }, - authResourceId: { - serializedName: "authResourceId", - nullable: true, - type: { - name: "String", - }, - }, - authIdentity: { - serializedName: "authIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - -export const ContentUnderstandingSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.ContentUnderstandingSkill", - type: { - name: "Composite", - className: "ContentUnderstandingSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - extractionOptions: { - serializedName: "extractionOptions", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - chunkingProperties: { - serializedName: "chunkingProperties", - type: { - name: "Composite", - className: "ContentUnderstandingSkillChunkingProperties", - }, - }, - }, - }, -}; - -export const AzureMachineLearningSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Custom.AmlSkill", - type: { - name: "Composite", - className: "AzureMachineLearningSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - scoringUri: { - serializedName: "uri", - nullable: true, - type: { - name: "String", - }, - }, - authenticationKey: { - serializedName: "key", - nullable: true, - type: { - name: "String", - }, - }, - resourceId: { - serializedName: "resourceId", - nullable: true, - type: { - name: "String", - }, - }, - timeout: { - serializedName: "timeout", - nullable: true, - type: { - name: "TimeSpan", - }, - }, - region: { - serializedName: "region", - nullable: true, - type: { - name: "String", - }, - }, - degreeOfParallelism: { - serializedName: "degreeOfParallelism", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const AzureOpenAIEmbeddingSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill", - type: { - name: "Composite", - className: "AzureOpenAIEmbeddingSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - ...AzureOpenAIParameters.type.modelProperties, - dimensions: { - serializedName: "dimensions", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const VisionVectorizeSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Vision.VectorizeSkill", - type: { - name: "Composite", - className: "VisionVectorizeSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - modelVersion: { - serializedName: "modelVersion", - required: true, - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DefaultCognitiveServicesAccount: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.DefaultCognitiveServices", - type: { - name: "Composite", - className: "DefaultCognitiveServicesAccount", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: - CognitiveServicesAccount.type.polymorphicDiscriminator, - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - }, - }, -}; - -export const CognitiveServicesAccountKey: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CognitiveServicesByKey", - type: { - name: "Composite", - className: "CognitiveServicesAccountKey", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: - CognitiveServicesAccount.type.polymorphicDiscriminator, - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - key: { - serializedName: "key", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AIServicesAccountKey: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.AIServicesByKey", - type: { - name: "Composite", - className: "AIServicesAccountKey", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: - CognitiveServicesAccount.type.polymorphicDiscriminator, - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - key: { - serializedName: "key", - required: true, - type: { - name: "String", - }, - }, - subdomainUrl: { - serializedName: "subdomainUrl", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AIServicesAccountIdentity: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.AIServicesByIdentity", - type: { - name: "Composite", - className: "AIServicesAccountIdentity", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: - CognitiveServicesAccount.type.polymorphicDiscriminator, - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - subdomainUrl: { - serializedName: "subdomainUrl", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SearchIndexerKnowledgeStoreTableProjectionSelector: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreTableProjectionSelector", - modelProperties: { - ...SearchIndexerKnowledgeStoreProjectionSelector.type.modelProperties, - tableName: { - serializedName: "tableName", - required: true, - type: { - name: "String", - }, - }, - }, - }, - }; - -export const SearchIndexerKnowledgeStoreBlobProjectionSelector: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreBlobProjectionSelector", - modelProperties: { - ...SearchIndexerKnowledgeStoreProjectionSelector.type.modelProperties, - storageContainer: { - serializedName: "storageContainer", - required: true, - type: { - name: "String", - }, - }, - }, - }, - }; - -export const DistanceScoringFunction: coreClient.CompositeMapper = { - serializedName: "distance", - type: { - name: "Composite", - className: "DistanceScoringFunction", - uberParent: "ScoringFunction", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - serializedName: "distance", - type: { - name: "Composite", - className: "DistanceScoringParameters", - }, - }, - }, - }, -}; - -export const FreshnessScoringFunction: coreClient.CompositeMapper = { - serializedName: "freshness", - type: { - name: "Composite", - className: "FreshnessScoringFunction", - uberParent: "ScoringFunction", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - serializedName: "freshness", - type: { - name: "Composite", - className: "FreshnessScoringParameters", - }, - }, - }, - }, -}; - -export const MagnitudeScoringFunction: coreClient.CompositeMapper = { - serializedName: "magnitude", - type: { - name: "Composite", - className: "MagnitudeScoringFunction", - uberParent: "ScoringFunction", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - serializedName: "magnitude", - type: { - name: "Composite", - className: "MagnitudeScoringParameters", - }, - }, - }, - }, -}; - -export const TagScoringFunction: coreClient.CompositeMapper = { - serializedName: "tag", - type: { - name: "Composite", - className: "TagScoringFunction", - uberParent: "ScoringFunction", - polymorphicDiscriminator: ScoringFunction.type.polymorphicDiscriminator, - modelProperties: { - ...ScoringFunction.type.modelProperties, - parameters: { - serializedName: "tag", - type: { - name: "Composite", - className: "TagScoringParameters", - }, - }, - }, - }, -}; - -export const CustomAnalyzer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CustomAnalyzer", - type: { - name: "Composite", - className: "CustomAnalyzer", - uberParent: "LexicalAnalyzer", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - tokenizerName: { - serializedName: "tokenizer", - required: true, - type: { - name: "String", - }, - }, - tokenFilters: { - serializedName: "tokenFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - charFilters: { - serializedName: "charFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const PatternAnalyzer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternAnalyzer", - type: { - name: "Composite", - className: "PatternAnalyzer", - uberParent: "LexicalAnalyzer", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - lowerCaseTerms: { - defaultValue: true, - serializedName: "lowercase", - type: { - name: "Boolean", - }, - }, - pattern: { - defaultValue: "\W+", - serializedName: "pattern", - type: { - name: "String", - }, - }, - flags: { - serializedName: "flags", - type: { - name: "String", - }, - }, - stopwords: { - serializedName: "stopwords", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const LuceneStandardAnalyzer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StandardAnalyzer", - type: { - name: "Composite", - className: "LuceneStandardAnalyzer", - uberParent: "LexicalAnalyzer", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - stopwords: { - serializedName: "stopwords", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const StopAnalyzer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StopAnalyzer", - type: { - name: "Composite", - className: "StopAnalyzer", - uberParent: "LexicalAnalyzer", - polymorphicDiscriminator: LexicalAnalyzer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalAnalyzer.type.modelProperties, - stopwords: { - serializedName: "stopwords", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const ClassicTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ClassicTokenizer", - type: { - name: "Composite", - className: "ClassicTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const EdgeNGramTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenizer", - type: { - name: "Composite", - className: "EdgeNGramTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - minGram: { - defaultValue: 1, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "minGram", - type: { - name: "Number", - }, - }, - maxGram: { - defaultValue: 2, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxGram", - type: { - name: "Number", - }, - }, - tokenChars: { - serializedName: "tokenChars", - type: { - name: "Sequence", - element: { - type: { - name: "Enum", - allowedValues: [ - "letter", - "digit", - "whitespace", - "punctuation", - "symbol", - ], - }, - }, - }, - }, - }, - }, -}; - -export const KeywordTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeywordTokenizer", - type: { - name: "Composite", - className: "KeywordTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - bufferSize: { - defaultValue: 256, - serializedName: "bufferSize", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const KeywordTokenizerV2: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeywordTokenizerV2", - type: { - name: "Composite", - className: "KeywordTokenizerV2", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 256, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const MicrosoftLanguageTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer", - type: { - name: "Composite", - className: "MicrosoftLanguageTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - isSearchTokenizer: { - defaultValue: false, - serializedName: "isSearchTokenizer", - type: { - name: "Boolean", - }, - }, - language: { - serializedName: "language", - type: { - name: "Enum", - allowedValues: [ - "bangla", - "bulgarian", - "catalan", - "chineseSimplified", - "chineseTraditional", - "croatian", - "czech", - "danish", - "dutch", - "english", - "french", - "german", - "greek", - "gujarati", - "hindi", - "icelandic", - "indonesian", - "italian", - "japanese", - "kannada", - "korean", - "malay", - "malayalam", - "marathi", - "norwegianBokmaal", - "polish", - "portuguese", - "portugueseBrazilian", - "punjabi", - "romanian", - "russian", - "serbianCyrillic", - "serbianLatin", - "slovenian", - "spanish", - "swedish", - "tamil", - "telugu", - "thai", - "ukrainian", - "urdu", - "vietnamese", - ], - }, - }, - }, - }, -}; - -export const MicrosoftLanguageStemmingTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer", - type: { - name: "Composite", - className: "MicrosoftLanguageStemmingTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - isSearchTokenizer: { - defaultValue: false, - serializedName: "isSearchTokenizer", - type: { - name: "Boolean", - }, - }, - language: { - serializedName: "language", - type: { - name: "Enum", - allowedValues: [ - "arabic", - "bangla", - "bulgarian", - "catalan", - "croatian", - "czech", - "danish", - "dutch", - "english", - "estonian", - "finnish", - "french", - "german", - "greek", - "gujarati", - "hebrew", - "hindi", - "hungarian", - "icelandic", - "indonesian", - "italian", - "kannada", - "latvian", - "lithuanian", - "malay", - "malayalam", - "marathi", - "norwegianBokmaal", - "polish", - "portuguese", - "portugueseBrazilian", - "punjabi", - "romanian", - "russian", - "serbianCyrillic", - "serbianLatin", - "slovak", - "slovenian", - "spanish", - "swedish", - "tamil", - "telugu", - "turkish", - "ukrainian", - "urdu", - ], - }, - }, - }, - }, -}; - -export const NGramTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.NGramTokenizer", - type: { - name: "Composite", - className: "NGramTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - minGram: { - defaultValue: 1, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "minGram", - type: { - name: "Number", - }, - }, - maxGram: { - defaultValue: 2, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxGram", - type: { - name: "Number", - }, - }, - tokenChars: { - serializedName: "tokenChars", - type: { - name: "Sequence", - element: { - type: { - name: "Enum", - allowedValues: [ - "letter", - "digit", - "whitespace", - "punctuation", - "symbol", - ], - }, - }, - }, - }, - }, - }, -}; - -export const PathHierarchyTokenizerV2: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2", - type: { - name: "Composite", - className: "PathHierarchyTokenizerV2", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - delimiter: { - defaultValue: "/", - serializedName: "delimiter", - type: { - name: "String", - }, - }, - replacement: { - defaultValue: "/", - serializedName: "replacement", - type: { - name: "String", - }, - }, - maxTokenLength: { - defaultValue: 300, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - reverseTokenOrder: { - defaultValue: false, - serializedName: "reverse", - type: { - name: "Boolean", - }, - }, - numberOfTokensToSkip: { - defaultValue: 0, - serializedName: "skip", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const PatternTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternTokenizer", - type: { - name: "Composite", - className: "PatternTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - pattern: { - defaultValue: "\W+", - serializedName: "pattern", - type: { - name: "String", - }, - }, - flags: { - serializedName: "flags", - type: { - name: "String", - }, - }, - group: { - defaultValue: -1, - serializedName: "group", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const LuceneStandardTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StandardTokenizer", - type: { - name: "Composite", - className: "LuceneStandardTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const LuceneStandardTokenizerV2: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StandardTokenizerV2", - type: { - name: "Composite", - className: "LuceneStandardTokenizerV2", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const UaxUrlEmailTokenizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer", - type: { - name: "Composite", - className: "UaxUrlEmailTokenizer", - uberParent: "LexicalTokenizer", - polymorphicDiscriminator: LexicalTokenizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalTokenizer.type.modelProperties, - maxTokenLength: { - defaultValue: 255, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxTokenLength", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const AsciiFoldingTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter", - type: { - name: "Composite", - className: "AsciiFoldingTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - preserveOriginal: { - defaultValue: false, - serializedName: "preserveOriginal", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const CjkBigramTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CjkBigramTokenFilter", - type: { - name: "Composite", - className: "CjkBigramTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - ignoreScripts: { - serializedName: "ignoreScripts", - type: { - name: "Sequence", - element: { - type: { - name: "Enum", - allowedValues: ["han", "hiragana", "katakana", "hangul"], - }, - }, - }, - }, - outputUnigrams: { - defaultValue: false, - serializedName: "outputUnigrams", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const CommonGramTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CommonGramTokenFilter", - type: { - name: "Composite", - className: "CommonGramTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - commonWords: { - serializedName: "commonWords", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - ignoreCase: { - defaultValue: false, - serializedName: "ignoreCase", - type: { - name: "Boolean", - }, - }, - useQueryMode: { - defaultValue: false, - serializedName: "queryMode", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const DictionaryDecompounderTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter", - type: { - name: "Composite", - className: "DictionaryDecompounderTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - wordList: { - serializedName: "wordList", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - minWordSize: { - defaultValue: 5, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "minWordSize", - type: { - name: "Number", - }, - }, - minSubwordSize: { - defaultValue: 2, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "minSubwordSize", - type: { - name: "Number", - }, - }, - maxSubwordSize: { - defaultValue: 15, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxSubwordSize", - type: { - name: "Number", - }, - }, - onlyLongestMatch: { - defaultValue: false, - serializedName: "onlyLongestMatch", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const EdgeNGramTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenFilter", - type: { - name: "Composite", - className: "EdgeNGramTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - defaultValue: 1, - serializedName: "minGram", - type: { - name: "Number", - }, - }, - maxGram: { - defaultValue: 2, - serializedName: "maxGram", - type: { - name: "Number", - }, - }, - side: { - serializedName: "side", - type: { - name: "Enum", - allowedValues: ["front", "back"], - }, - }, - }, - }, -}; - -export const EdgeNGramTokenFilterV2: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2", - type: { - name: "Composite", - className: "EdgeNGramTokenFilterV2", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - defaultValue: 1, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "minGram", - type: { - name: "Number", - }, - }, - maxGram: { - defaultValue: 2, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxGram", - type: { - name: "Number", - }, - }, - side: { - serializedName: "side", - type: { - name: "Enum", - allowedValues: ["front", "back"], - }, - }, - }, - }, -}; - -export const ElisionTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ElisionTokenFilter", - type: { - name: "Composite", - className: "ElisionTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - articles: { - serializedName: "articles", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const KeepTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeepTokenFilter", - type: { - name: "Composite", - className: "KeepTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - keepWords: { - serializedName: "keepWords", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - lowerCaseKeepWords: { - defaultValue: false, - serializedName: "keepWordsCase", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const KeywordMarkerTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter", - type: { - name: "Composite", - className: "KeywordMarkerTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - keywords: { - serializedName: "keywords", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - ignoreCase: { - defaultValue: false, - serializedName: "ignoreCase", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const LengthTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.LengthTokenFilter", - type: { - name: "Composite", - className: "LengthTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - minLength: { - defaultValue: 0, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "min", - type: { - name: "Number", - }, - }, - maxLength: { - defaultValue: 300, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "max", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const LimitTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.LimitTokenFilter", - type: { - name: "Composite", - className: "LimitTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - maxTokenCount: { - defaultValue: 1, - serializedName: "maxTokenCount", - type: { - name: "Number", - }, - }, - consumeAllTokens: { - defaultValue: false, - serializedName: "consumeAllTokens", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const NGramTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.NGramTokenFilter", - type: { - name: "Composite", - className: "NGramTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - defaultValue: 1, - serializedName: "minGram", - type: { - name: "Number", - }, - }, - maxGram: { - defaultValue: 2, - serializedName: "maxGram", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const NGramTokenFilterV2: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.NGramTokenFilterV2", - type: { - name: "Composite", - className: "NGramTokenFilterV2", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - minGram: { - defaultValue: 1, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "minGram", - type: { - name: "Number", - }, - }, - maxGram: { - defaultValue: 2, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "maxGram", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const PatternCaptureTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternCaptureTokenFilter", - type: { - name: "Composite", - className: "PatternCaptureTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - patterns: { - serializedName: "patterns", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - preserveOriginal: { - defaultValue: true, - serializedName: "preserveOriginal", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const PatternReplaceTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternReplaceTokenFilter", - type: { - name: "Composite", - className: "PatternReplaceTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - pattern: { - serializedName: "pattern", - required: true, - type: { - name: "String", - }, - }, - replacement: { - serializedName: "replacement", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const PhoneticTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PhoneticTokenFilter", - type: { - name: "Composite", - className: "PhoneticTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - encoder: { - serializedName: "encoder", - type: { - name: "Enum", - allowedValues: [ - "metaphone", - "doubleMetaphone", - "soundex", - "refinedSoundex", - "caverphone1", - "caverphone2", - "cologne", - "nysiis", - "koelnerPhonetik", - "haasePhonetik", - "beiderMorse", - ], - }, - }, - replaceOriginalTokens: { - defaultValue: true, - serializedName: "replace", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const ShingleTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ShingleTokenFilter", - type: { - name: "Composite", - className: "ShingleTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - maxShingleSize: { - defaultValue: 2, - constraints: { - InclusiveMinimum: 2, - }, - serializedName: "maxShingleSize", - type: { - name: "Number", - }, - }, - minShingleSize: { - defaultValue: 2, - constraints: { - InclusiveMinimum: 2, - }, - serializedName: "minShingleSize", - type: { - name: "Number", - }, - }, - outputUnigrams: { - defaultValue: true, - serializedName: "outputUnigrams", - type: { - name: "Boolean", - }, - }, - outputUnigramsIfNoShingles: { - defaultValue: false, - serializedName: "outputUnigramsIfNoShingles", - type: { - name: "Boolean", - }, - }, - tokenSeparator: { - defaultValue: " ", - serializedName: "tokenSeparator", - type: { - name: "String", - }, - }, - filterToken: { - defaultValue: "_", - serializedName: "filterToken", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const SnowballTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SnowballTokenFilter", - type: { - name: "Composite", - className: "SnowballTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - language: { - serializedName: "language", - required: true, - type: { - name: "Enum", - allowedValues: [ - "armenian", - "basque", - "catalan", - "danish", - "dutch", - "english", - "finnish", - "french", - "german", - "german2", - "hungarian", - "italian", - "kp", - "lovins", - "norwegian", - "porter", - "portuguese", - "romanian", - "russian", - "spanish", - "swedish", - "turkish", - ], - }, - }, - }, - }, -}; - -export const StemmerTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StemmerTokenFilter", - type: { - name: "Composite", - className: "StemmerTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - language: { - serializedName: "language", - required: true, - type: { - name: "Enum", - allowedValues: [ - "arabic", - "armenian", - "basque", - "brazilian", - "bulgarian", - "catalan", - "czech", - "danish", - "dutch", - "dutchKp", - "english", - "lightEnglish", - "minimalEnglish", - "possessiveEnglish", - "porter2", - "lovins", - "finnish", - "lightFinnish", - "french", - "lightFrench", - "minimalFrench", - "galician", - "minimalGalician", - "german", - "german2", - "lightGerman", - "minimalGerman", - "greek", - "hindi", - "hungarian", - "lightHungarian", - "indonesian", - "irish", - "italian", - "lightItalian", - "sorani", - "latvian", - "norwegian", - "lightNorwegian", - "minimalNorwegian", - "lightNynorsk", - "minimalNynorsk", - "portuguese", - "lightPortuguese", - "minimalPortuguese", - "portugueseRslp", - "romanian", - "russian", - "lightRussian", - "spanish", - "lightSpanish", - "swedish", - "lightSwedish", - "turkish", - ], - }, - }, - }, - }, -}; - -export const StemmerOverrideTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter", - type: { - name: "Composite", - className: "StemmerOverrideTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - rules: { - serializedName: "rules", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const StopwordsTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.StopwordsTokenFilter", - type: { - name: "Composite", - className: "StopwordsTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - stopwords: { - serializedName: "stopwords", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - stopwordsList: { - serializedName: "stopwordsList", - type: { - name: "Enum", - allowedValues: [ - "arabic", - "armenian", - "basque", - "brazilian", - "bulgarian", - "catalan", - "czech", - "danish", - "dutch", - "english", - "finnish", - "french", - "galician", - "german", - "greek", - "hindi", - "hungarian", - "indonesian", - "irish", - "italian", - "latvian", - "norwegian", - "persian", - "portuguese", - "romanian", - "russian", - "sorani", - "spanish", - "swedish", - "thai", - "turkish", - ], - }, - }, - ignoreCase: { - defaultValue: false, - serializedName: "ignoreCase", - type: { - name: "Boolean", - }, - }, - removeTrailingStopWords: { - defaultValue: true, - serializedName: "removeTrailing", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const SynonymTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.SynonymTokenFilter", - type: { - name: "Composite", - className: "SynonymTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - synonyms: { - serializedName: "synonyms", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - ignoreCase: { - defaultValue: false, - serializedName: "ignoreCase", - type: { - name: "Boolean", - }, - }, - expand: { - defaultValue: true, - serializedName: "expand", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const TruncateTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.TruncateTokenFilter", - type: { - name: "Composite", - className: "TruncateTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - length: { - defaultValue: 300, - constraints: { - InclusiveMaximum: 300, - }, - serializedName: "length", - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const UniqueTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.UniqueTokenFilter", - type: { - name: "Composite", - className: "UniqueTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - onlyOnSamePosition: { - defaultValue: false, - serializedName: "onlyOnSamePosition", - type: { - name: "Boolean", - }, - }, - }, - }, -}; - -export const WordDelimiterTokenFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.WordDelimiterTokenFilter", - type: { - name: "Composite", - className: "WordDelimiterTokenFilter", - uberParent: "TokenFilter", - polymorphicDiscriminator: TokenFilter.type.polymorphicDiscriminator, - modelProperties: { - ...TokenFilter.type.modelProperties, - generateWordParts: { - defaultValue: true, - serializedName: "generateWordParts", - type: { - name: "Boolean", - }, - }, - generateNumberParts: { - defaultValue: true, - serializedName: "generateNumberParts", - type: { - name: "Boolean", - }, - }, - catenateWords: { - defaultValue: false, - serializedName: "catenateWords", - type: { - name: "Boolean", - }, - }, - catenateNumbers: { - defaultValue: false, - serializedName: "catenateNumbers", - type: { - name: "Boolean", - }, - }, - catenateAll: { - defaultValue: false, - serializedName: "catenateAll", - type: { - name: "Boolean", - }, - }, - splitOnCaseChange: { - defaultValue: true, - serializedName: "splitOnCaseChange", - type: { - name: "Boolean", - }, - }, - preserveOriginal: { - defaultValue: false, - serializedName: "preserveOriginal", - type: { - name: "Boolean", - }, - }, - splitOnNumerics: { - defaultValue: true, - serializedName: "splitOnNumerics", - type: { - name: "Boolean", - }, - }, - stemEnglishPossessive: { - defaultValue: true, - serializedName: "stemEnglishPossessive", - type: { - name: "Boolean", - }, - }, - protectedWords: { - serializedName: "protectedWords", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const MappingCharFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.MappingCharFilter", - type: { - name: "Composite", - className: "MappingCharFilter", - uberParent: "CharFilter", - polymorphicDiscriminator: CharFilter.type.polymorphicDiscriminator, - modelProperties: { - ...CharFilter.type.modelProperties, - mappings: { - serializedName: "mappings", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const PatternReplaceCharFilter: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.PatternReplaceCharFilter", - type: { - name: "Composite", - className: "PatternReplaceCharFilter", - uberParent: "CharFilter", - polymorphicDiscriminator: CharFilter.type.polymorphicDiscriminator, - modelProperties: { - ...CharFilter.type.modelProperties, - pattern: { - serializedName: "pattern", - required: true, - type: { - name: "String", - }, - }, - replacement: { - serializedName: "replacement", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const CustomNormalizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CustomNormalizer", - type: { - name: "Composite", - className: "CustomNormalizer", - uberParent: "LexicalNormalizer", - polymorphicDiscriminator: LexicalNormalizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalNormalizer.type.modelProperties, - tokenFilters: { - serializedName: "tokenFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - charFilters: { - serializedName: "charFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const ClassicSimilarity: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.ClassicSimilarity", - type: { - name: "Composite", - className: "ClassicSimilarity", - uberParent: "Similarity", - polymorphicDiscriminator: Similarity.type.polymorphicDiscriminator, - modelProperties: { - ...Similarity.type.modelProperties, - }, - }, -}; - -export const BM25Similarity: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.BM25Similarity", - type: { - name: "Composite", - className: "BM25Similarity", - uberParent: "Similarity", - polymorphicDiscriminator: Similarity.type.polymorphicDiscriminator, - modelProperties: { - ...Similarity.type.modelProperties, - k1: { - serializedName: "k1", - nullable: true, - type: { - name: "Number", - }, - }, - b: { - serializedName: "b", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const HnswAlgorithmConfiguration: coreClient.CompositeMapper = { - serializedName: "hnsw", - type: { - name: "Composite", - className: "HnswAlgorithmConfiguration", - uberParent: "VectorSearchAlgorithmConfiguration", - polymorphicDiscriminator: - VectorSearchAlgorithmConfiguration.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchAlgorithmConfiguration.type.modelProperties, - parameters: { - serializedName: "hnswParameters", - type: { - name: "Composite", - className: "HnswParameters", - }, - }, - }, - }, -}; - -export const ExhaustiveKnnAlgorithmConfiguration: coreClient.CompositeMapper = { - serializedName: "exhaustiveKnn", - type: { - name: "Composite", - className: "ExhaustiveKnnAlgorithmConfiguration", - uberParent: "VectorSearchAlgorithmConfiguration", - polymorphicDiscriminator: - VectorSearchAlgorithmConfiguration.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchAlgorithmConfiguration.type.modelProperties, - parameters: { - serializedName: "exhaustiveKnnParameters", - type: { - name: "Composite", - className: "ExhaustiveKnnParameters", - }, - }, - }, - }, -}; - -export const AzureOpenAIVectorizer: coreClient.CompositeMapper = { - serializedName: "azureOpenAI", - type: { - name: "Composite", - className: "AzureOpenAIVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: - VectorSearchVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchVectorizer.type.modelProperties, - parameters: { - serializedName: "azureOpenAIParameters", - type: { - name: "Composite", - className: "AzureOpenAIParameters", - }, - }, - }, - }, -}; - -export const WebApiVectorizer: coreClient.CompositeMapper = { - serializedName: "customWebApi", - type: { - name: "Composite", - className: "WebApiVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: - VectorSearchVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchVectorizer.type.modelProperties, - parameters: { - serializedName: "customWebApiParameters", - type: { - name: "Composite", - className: "WebApiParameters", - }, - }, - }, - }, -}; - -export const AIServicesVisionVectorizer: coreClient.CompositeMapper = { - serializedName: "aiServicesVision", - type: { - name: "Composite", - className: "AIServicesVisionVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: - VectorSearchVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchVectorizer.type.modelProperties, - aIServicesVisionParameters: { - serializedName: "aiServicesVisionParameters", - type: { - name: "Composite", - className: "AIServicesVisionParameters", - }, - }, - }, - }, -}; - -export const AMLVectorizer: coreClient.CompositeMapper = { - serializedName: "aml", - type: { - name: "Composite", - className: "AMLVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: - VectorSearchVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchVectorizer.type.modelProperties, - aMLParameters: { - serializedName: "amlParameters", - type: { - name: "Composite", - className: "AMLParameters", - }, - }, - }, - }, -}; - -export const ScalarQuantizationCompression: coreClient.CompositeMapper = { - serializedName: "scalarQuantization", - type: { - name: "Composite", - className: "ScalarQuantizationCompression", - uberParent: "VectorSearchCompression", - polymorphicDiscriminator: - VectorSearchCompression.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchCompression.type.modelProperties, - parameters: { - serializedName: "scalarQuantizationParameters", - type: { - name: "Composite", - className: "ScalarQuantizationParameters", - }, - }, - }, - }, -}; - -export const BinaryQuantizationCompression: coreClient.CompositeMapper = { - serializedName: "binaryQuantization", - type: { - name: "Composite", - className: "BinaryQuantizationCompression", - uberParent: "VectorSearchCompression", - polymorphicDiscriminator: - VectorSearchCompression.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchCompression.type.modelProperties, - }, - }, -}; - -export const KnowledgeSourceAzureOpenAIVectorizer: coreClient.CompositeMapper = - { - serializedName: "azureOpenAI", - type: { - name: "Composite", - className: "KnowledgeSourceAzureOpenAIVectorizer", - uberParent: "KnowledgeSourceVectorizer", - polymorphicDiscriminator: - KnowledgeSourceVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...KnowledgeSourceVectorizer.type.modelProperties, - azureOpenAIParameters: { - serializedName: "azureOpenAIParameters", - type: { - name: "Composite", - className: "AzureOpenAIParameters", - }, - }, - }, - }, - }; - -export const ChatCompletionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Custom.ChatCompletionSkill", - type: { - name: "Composite", - className: "ChatCompletionSkill", - uberParent: "WebApiSkill", - polymorphicDiscriminator: WebApiSkill.type.polymorphicDiscriminator, - modelProperties: { - ...WebApiSkill.type.modelProperties, - apiKey: { - serializedName: "apiKey", - type: { - name: "String", - }, - }, - commonModelParameters: { - serializedName: "commonModelParameters", - type: { - name: "Composite", - className: "CommonModelParameters", - }, - }, - extraParameters: { - serializedName: "extraParameters", - nullable: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - extraParametersBehavior: { - defaultValue: "error", - serializedName: "extraParametersBehavior", - type: { - name: "String", - }, - }, - responseFormat: { - serializedName: "responseFormat", - type: { - name: "Composite", - className: "ChatCompletionResponseFormat", - }, - }, - }, - }, -}; - -export const SearchIndexerKnowledgeStoreObjectProjectionSelector: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreObjectProjectionSelector", - modelProperties: { - ...SearchIndexerKnowledgeStoreBlobProjectionSelector.type - .modelProperties, - }, - }, - }; - -export const SearchIndexerKnowledgeStoreFileProjectionSelector: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreFileProjectionSelector", - modelProperties: { - ...SearchIndexerKnowledgeStoreBlobProjectionSelector.type - .modelProperties, - }, - }, - }; - -export let discriminators = { - KnowledgeBaseModel: KnowledgeBaseModel, - KnowledgeRetrievalReasoningEffort: KnowledgeRetrievalReasoningEffort, - SearchIndexerDataIdentity: SearchIndexerDataIdentity, - KnowledgeSource: KnowledgeSource, - DataChangeDetectionPolicy: DataChangeDetectionPolicy, - DataDeletionDetectionPolicy: DataDeletionDetectionPolicy, - SearchIndexerSkill: SearchIndexerSkill, - CognitiveServicesAccount: CognitiveServicesAccount, - ScoringFunction: ScoringFunction, - LexicalAnalyzer: LexicalAnalyzer, - LexicalTokenizer: LexicalTokenizer, - TokenFilter: TokenFilter, - CharFilter: CharFilter, - LexicalNormalizer: LexicalNormalizer, - Similarity: Similarity, - VectorSearchAlgorithmConfiguration: VectorSearchAlgorithmConfiguration, - VectorSearchVectorizer: VectorSearchVectorizer, - VectorSearchCompression: VectorSearchCompression, - KnowledgeSourceVectorizer: KnowledgeSourceVectorizer, - "KnowledgeBaseModel.azureOpenAI": KnowledgeBaseAzureOpenAIModel, - "KnowledgeRetrievalReasoningEffort.minimal": - KnowledgeRetrievalMinimalReasoningEffort, - "KnowledgeRetrievalReasoningEffort.low": KnowledgeRetrievalLowReasoningEffort, - "KnowledgeRetrievalReasoningEffort.medium": - KnowledgeRetrievalMediumReasoningEffort, - "SearchIndexerDataIdentity.#Microsoft.Azure.Search.DataNoneIdentity": - SearchIndexerDataNoneIdentity, - "SearchIndexerDataIdentity.#Microsoft.Azure.Search.DataUserAssignedIdentity": - SearchIndexerDataUserAssignedIdentity, - "KnowledgeSource.searchIndex": SearchIndexKnowledgeSource, - "KnowledgeSource.azureBlob": AzureBlobKnowledgeSource, - "KnowledgeSource.indexedSharePoint": IndexedSharePointKnowledgeSource, - "KnowledgeSource.indexedOneLake": IndexedOneLakeKnowledgeSource, - "KnowledgeSource.web": WebKnowledgeSource, - "KnowledgeSource.remoteSharePoint": RemoteSharePointKnowledgeSource, - "DataChangeDetectionPolicy.#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": - HighWaterMarkChangeDetectionPolicy, - "DataChangeDetectionPolicy.#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": - SqlIntegratedChangeTrackingPolicy, - "DataDeletionDetectionPolicy.#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": - SoftDeleteColumnDeletionDetectionPolicy, - "DataDeletionDetectionPolicy.#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": - NativeBlobSoftDeleteDeletionDetectionPolicy, - "SearchIndexerSkill.#Microsoft.Skills.Util.ConditionalSkill": - ConditionalSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.KeyPhraseExtractionSkill": - KeyPhraseExtractionSkill, - "SearchIndexerSkill.#Microsoft.Skills.Vision.OcrSkill": OcrSkill, - "SearchIndexerSkill.#Microsoft.Skills.Vision.ImageAnalysisSkill": - ImageAnalysisSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.LanguageDetectionSkill": - LanguageDetectionSkill, - "SearchIndexerSkill.#Microsoft.Skills.Util.ShaperSkill": ShaperSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.MergeSkill": MergeSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.EntityRecognitionSkill": - EntityRecognitionSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.SentimentSkill": SentimentSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.V3.SentimentSkill": - SentimentSkillV3, - "SearchIndexerSkill.#Microsoft.Skills.Text.V3.EntityLinkingSkill": - EntityLinkingSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.V3.EntityRecognitionSkill": - EntityRecognitionSkillV3, - "SearchIndexerSkill.#Microsoft.Skills.Text.PIIDetectionSkill": - PIIDetectionSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.SplitSkill": SplitSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.CustomEntityLookupSkill": - CustomEntityLookupSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.TranslationSkill": - TextTranslationSkill, - "SearchIndexerSkill.#Microsoft.Skills.Util.DocumentExtractionSkill": - DocumentExtractionSkill, - "SearchIndexerSkill.#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": - DocumentIntelligenceLayoutSkill, - "SearchIndexerSkill.#Microsoft.Skills.Custom.WebApiSkill": WebApiSkill, - "SearchIndexerSkill.#Microsoft.Skills.Util.ContentUnderstandingSkill": - ContentUnderstandingSkill, - "SearchIndexerSkill.#Microsoft.Skills.Custom.AmlSkill": - AzureMachineLearningSkill, - "SearchIndexerSkill.#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": - AzureOpenAIEmbeddingSkill, - "SearchIndexerSkill.#Microsoft.Skills.Vision.VectorizeSkill": - VisionVectorizeSkill, - "CognitiveServicesAccount.#Microsoft.Azure.Search.DefaultCognitiveServices": - DefaultCognitiveServicesAccount, - "CognitiveServicesAccount.#Microsoft.Azure.Search.CognitiveServicesByKey": - CognitiveServicesAccountKey, - "CognitiveServicesAccount.#Microsoft.Azure.Search.AIServicesByKey": - AIServicesAccountKey, - "CognitiveServicesAccount.#Microsoft.Azure.Search.AIServicesByIdentity": - AIServicesAccountIdentity, - "ScoringFunction.distance": DistanceScoringFunction, - "ScoringFunction.freshness": FreshnessScoringFunction, - "ScoringFunction.magnitude": MagnitudeScoringFunction, - "ScoringFunction.tag": TagScoringFunction, - "LexicalAnalyzer.#Microsoft.Azure.Search.CustomAnalyzer": CustomAnalyzer, - "LexicalAnalyzer.#Microsoft.Azure.Search.PatternAnalyzer": PatternAnalyzer, - "LexicalAnalyzer.#Microsoft.Azure.Search.StandardAnalyzer": - LuceneStandardAnalyzer, - "LexicalAnalyzer.#Microsoft.Azure.Search.StopAnalyzer": StopAnalyzer, - "LexicalTokenizer.#Microsoft.Azure.Search.ClassicTokenizer": ClassicTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.EdgeNGramTokenizer": - EdgeNGramTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.KeywordTokenizer": KeywordTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.KeywordTokenizerV2": - KeywordTokenizerV2, - "LexicalTokenizer.#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": - MicrosoftLanguageTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": - MicrosoftLanguageStemmingTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.NGramTokenizer": NGramTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.PathHierarchyTokenizerV2": - PathHierarchyTokenizerV2, - "LexicalTokenizer.#Microsoft.Azure.Search.PatternTokenizer": PatternTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.StandardTokenizer": - LuceneStandardTokenizer, - "LexicalTokenizer.#Microsoft.Azure.Search.StandardTokenizerV2": - LuceneStandardTokenizerV2, - "LexicalTokenizer.#Microsoft.Azure.Search.UaxUrlEmailTokenizer": - UaxUrlEmailTokenizer, - "TokenFilter.#Microsoft.Azure.Search.AsciiFoldingTokenFilter": - AsciiFoldingTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.CjkBigramTokenFilter": - CjkBigramTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.CommonGramTokenFilter": - CommonGramTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": - DictionaryDecompounderTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.EdgeNGramTokenFilter": - EdgeNGramTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": - EdgeNGramTokenFilterV2, - "TokenFilter.#Microsoft.Azure.Search.ElisionTokenFilter": ElisionTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.KeepTokenFilter": KeepTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.KeywordMarkerTokenFilter": - KeywordMarkerTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.LengthTokenFilter": LengthTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.LimitTokenFilter": LimitTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.NGramTokenFilter": NGramTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.NGramTokenFilterV2": NGramTokenFilterV2, - "TokenFilter.#Microsoft.Azure.Search.PatternCaptureTokenFilter": - PatternCaptureTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.PatternReplaceTokenFilter": - PatternReplaceTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.PhoneticTokenFilter": - PhoneticTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.ShingleTokenFilter": ShingleTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.SnowballTokenFilter": - SnowballTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.StemmerTokenFilter": StemmerTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.StemmerOverrideTokenFilter": - StemmerOverrideTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.StopwordsTokenFilter": - StopwordsTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.SynonymTokenFilter": SynonymTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.TruncateTokenFilter": - TruncateTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.UniqueTokenFilter": UniqueTokenFilter, - "TokenFilter.#Microsoft.Azure.Search.WordDelimiterTokenFilter": - WordDelimiterTokenFilter, - "CharFilter.#Microsoft.Azure.Search.MappingCharFilter": MappingCharFilter, - "CharFilter.#Microsoft.Azure.Search.PatternReplaceCharFilter": - PatternReplaceCharFilter, - "LexicalNormalizer.#Microsoft.Azure.Search.CustomNormalizer": - CustomNormalizer, - "Similarity.#Microsoft.Azure.Search.ClassicSimilarity": ClassicSimilarity, - "Similarity.#Microsoft.Azure.Search.BM25Similarity": BM25Similarity, - "VectorSearchAlgorithmConfiguration.hnsw": HnswAlgorithmConfiguration, - "VectorSearchAlgorithmConfiguration.exhaustiveKnn": - ExhaustiveKnnAlgorithmConfiguration, - "VectorSearchVectorizer.azureOpenAI": AzureOpenAIVectorizer, - "VectorSearchVectorizer.customWebApi": WebApiVectorizer, - "VectorSearchVectorizer.aiServicesVision": AIServicesVisionVectorizer, - "VectorSearchVectorizer.aml": AMLVectorizer, - "VectorSearchCompression.scalarQuantization": ScalarQuantizationCompression, - "VectorSearchCompression.binaryQuantization": BinaryQuantizationCompression, - "KnowledgeSourceVectorizer.azureOpenAI": KnowledgeSourceAzureOpenAIVectorizer, - "WebApiSkill.#Microsoft.Skills.Custom.ChatCompletionSkill": - ChatCompletionSkill, -}; diff --git a/sdk/search/search-documents/src/generated/service/models/parameters.ts b/sdk/search/search-documents/src/generated/service/models/parameters.ts deleted file mode 100644 index 23d89b5f3c13..000000000000 --- a/sdk/search/search-documents/src/generated/service/models/parameters.ts +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - OperationParameter, - OperationURLParameter, - OperationQueryParameter, -} from "@azure/core-client"; -import { - KnowledgeBase as KnowledgeBaseMapper, - KnowledgeSource as KnowledgeSourceMapper, - SearchIndexerDataSource as SearchIndexerDataSourceMapper, - DocumentKeysOrIds as DocumentKeysOrIdsMapper, - IndexerResyncBody as IndexerResyncBodyMapper, - SearchIndexer as SearchIndexerMapper, - SearchIndexerSkillset as SearchIndexerSkillsetMapper, - SkillNames as SkillNamesMapper, - SynonymMap as SynonymMapMapper, - SearchIndex as SearchIndexMapper, - AnalyzeRequest as AnalyzeRequestMapper, - SearchAlias as SearchAliasMapper, -} from "../models/mappers.js"; - -export const contentType: OperationParameter = { - parameterPath: ["options", "contentType"], - mapper: { - defaultValue: "application/json", - isConstant: true, - serializedName: "Content-Type", - type: { - name: "String", - }, - }, -}; - -export const knowledgeBase: OperationParameter = { - parameterPath: "knowledgeBase", - mapper: KnowledgeBaseMapper, -}; - -export const accept: OperationParameter = { - parameterPath: "accept", - mapper: { - defaultValue: "application/json", - isConstant: true, - serializedName: "Accept", - type: { - name: "String", - }, - }, -}; - -export const endpoint: OperationURLParameter = { - parameterPath: "endpoint", - mapper: { - serializedName: "endpoint", - required: true, - type: { - name: "String", - }, - }, - skipEncoding: true, -}; - -export const knowledgeBaseName: OperationURLParameter = { - parameterPath: "knowledgeBaseName", - mapper: { - serializedName: "knowledgeBaseName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const ifMatch: OperationParameter = { - parameterPath: ["options", "ifMatch"], - mapper: { - serializedName: "If-Match", - type: { - name: "String", - }, - }, -}; - -export const ifNoneMatch: OperationParameter = { - parameterPath: ["options", "ifNoneMatch"], - mapper: { - serializedName: "If-None-Match", - type: { - name: "String", - }, - }, -}; - -export const prefer: OperationParameter = { - parameterPath: "prefer", - mapper: { - defaultValue: "return=representation", - isConstant: true, - serializedName: "Prefer", - type: { - name: "String", - }, - }, -}; - -export const apiVersion: OperationQueryParameter = { - parameterPath: "apiVersion", - mapper: { - serializedName: "api-version", - required: true, - type: { - name: "String", - }, - }, -}; - -export const knowledgeSource: OperationParameter = { - parameterPath: "knowledgeSource", - mapper: KnowledgeSourceMapper, -}; - -export const sourceName: OperationURLParameter = { - parameterPath: "sourceName", - mapper: { - serializedName: "sourceName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const dataSource: OperationParameter = { - parameterPath: "dataSource", - mapper: SearchIndexerDataSourceMapper, -}; - -export const dataSourceName: OperationURLParameter = { - parameterPath: "dataSourceName", - mapper: { - serializedName: "dataSourceName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const skipIndexerResetRequirementForCache: OperationQueryParameter = { - parameterPath: ["options", "skipIndexerResetRequirementForCache"], - mapper: { - serializedName: "ignoreResetRequirements", - type: { - name: "Boolean", - }, - }, -}; - -export const select: OperationQueryParameter = { - parameterPath: ["options", "select"], - mapper: { - serializedName: "$select", - type: { - name: "String", - }, - }, -}; - -export const indexerName: OperationURLParameter = { - parameterPath: "indexerName", - mapper: { - serializedName: "indexerName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const keysOrIds: OperationParameter = { - parameterPath: ["options", "keysOrIds"], - mapper: DocumentKeysOrIdsMapper, -}; - -export const overwrite: OperationQueryParameter = { - parameterPath: ["options", "overwrite"], - mapper: { - defaultValue: false, - serializedName: "overwrite", - type: { - name: "Boolean", - }, - }, -}; - -export const indexerResync: OperationParameter = { - parameterPath: "indexerResync", - mapper: IndexerResyncBodyMapper, -}; - -export const indexer: OperationParameter = { - parameterPath: "indexer", - mapper: SearchIndexerMapper, -}; - -export const disableCacheReprocessingChangeDetection: OperationQueryParameter = - { - parameterPath: ["options", "disableCacheReprocessingChangeDetection"], - mapper: { - serializedName: "disableCacheReprocessingChangeDetection", - type: { - name: "Boolean", - }, - }, - }; - -export const skillset: OperationParameter = { - parameterPath: "skillset", - mapper: SearchIndexerSkillsetMapper, -}; - -export const skillsetName: OperationURLParameter = { - parameterPath: "skillsetName", - mapper: { - serializedName: "skillsetName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const skillNames: OperationParameter = { - parameterPath: "skillNames", - mapper: SkillNamesMapper, -}; - -export const synonymMap: OperationParameter = { - parameterPath: "synonymMap", - mapper: SynonymMapMapper, -}; - -export const synonymMapName: OperationURLParameter = { - parameterPath: "synonymMapName", - mapper: { - serializedName: "synonymMapName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const index: OperationParameter = { - parameterPath: "index", - mapper: SearchIndexMapper, -}; - -export const indexName: OperationURLParameter = { - parameterPath: "indexName", - mapper: { - serializedName: "indexName", - required: true, - type: { - name: "String", - }, - }, -}; - -export const allowIndexDowntime: OperationQueryParameter = { - parameterPath: ["options", "allowIndexDowntime"], - mapper: { - serializedName: "allowIndexDowntime", - type: { - name: "Boolean", - }, - }, -}; - -export const request: OperationParameter = { - parameterPath: "request", - mapper: AnalyzeRequestMapper, -}; - -export const alias: OperationParameter = { - parameterPath: "alias", - mapper: SearchAliasMapper, -}; - -export const aliasName: OperationURLParameter = { - parameterPath: "aliasName", - mapper: { - serializedName: "aliasName", - required: true, - type: { - name: "String", - }, - }, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/aliases.ts b/sdk/search/search-documents/src/generated/service/operations/aliases.ts deleted file mode 100644 index 32ca20eb76a4..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/aliases.ts +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { Aliases } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - SearchAlias, - AliasesCreateOptionalParams, - AliasesCreateResponse, - AliasesListOptionalParams, - AliasesListResponse, - AliasesCreateOrUpdateOptionalParams, - AliasesCreateOrUpdateResponse, - AliasesDeleteOptionalParams, - AliasesGetOptionalParams, - AliasesGetResponse, -} from "../models/index.js"; - -/** Class containing Aliases operations. */ -export class AliasesImpl implements Aliases { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class Aliases class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new search alias. - * @param alias The definition of the alias to create. - * @param options The options parameters. - */ - create( - alias: SearchAlias, - options?: AliasesCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { alias, options }, - createOperationSpec, - ); - } - - /** - * Lists all aliases available for a search service. - * @param options The options parameters. - */ - list(options?: AliasesListOptionalParams): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new search alias or updates an alias if it already exists. - * @param aliasName The definition of the alias to create or update. - * @param alias The definition of the alias to create or update. - * @param options The options parameters. - */ - createOrUpdate( - aliasName: string, - alias: SearchAlias, - options?: AliasesCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { aliasName, alias, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no - * recovery option. The mapped index is untouched by this operation. - * @param aliasName The name of the alias to delete. - * @param options The options parameters. - */ - delete( - aliasName: string, - options?: AliasesDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { aliasName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves an alias definition. - * @param aliasName The name of the alias to retrieve. - * @param options The options parameters. - */ - get( - aliasName: string, - options?: AliasesGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { aliasName, options }, - getOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOperationSpec: coreClient.OperationSpec = { - path: "/aliases", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SearchAlias, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.alias, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/aliases", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListAliasesResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/aliases('{aliasName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SearchAlias, - }, - 201: { - bodyMapper: Mappers.SearchAlias, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.alias, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.aliasName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/aliases('{aliasName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.aliasName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/aliases('{aliasName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchAlias, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.aliasName], - headerParameters: [Parameters.accept], - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts deleted file mode 100644 index 5bf610119b24..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { DataSources } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - SearchIndexerDataSource, - DataSourcesCreateOrUpdateOptionalParams, - DataSourcesCreateOrUpdateResponse, - DataSourcesDeleteOptionalParams, - DataSourcesGetOptionalParams, - DataSourcesGetResponse, - DataSourcesListOptionalParams, - DataSourcesListResponse, - DataSourcesCreateOptionalParams, - DataSourcesCreateResponse, -} from "../models/index.js"; - -/** Class containing DataSources operations. */ -export class DataSourcesImpl implements DataSources { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class DataSources class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new datasource or updates a datasource if it already exists. - * @param dataSourceName The name of the datasource to create or update. - * @param dataSource The definition of the datasource to create or update. - * @param options The options parameters. - */ - createOrUpdate( - dataSourceName: string, - dataSource: SearchIndexerDataSource, - options?: DataSourcesCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { dataSourceName, dataSource, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes a datasource. - * @param dataSourceName The name of the datasource to delete. - * @param options The options parameters. - */ - delete( - dataSourceName: string, - options?: DataSourcesDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { dataSourceName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves a datasource definition. - * @param dataSourceName The name of the datasource to retrieve. - * @param options The options parameters. - */ - get( - dataSourceName: string, - options?: DataSourcesGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { dataSourceName, options }, - getOperationSpec, - ); - } - - /** - * Lists all datasources available for a search service. - * @param options The options parameters. - */ - list( - options?: DataSourcesListOptionalParams, - ): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new datasource. - * @param dataSource The definition of the datasource to create. - * @param options The options parameters. - */ - create( - dataSource: SearchIndexerDataSource, - options?: DataSourcesCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { dataSource, options }, - createOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/datasources('{dataSourceName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexerDataSource, - }, - 201: { - bodyMapper: Mappers.SearchIndexerDataSource, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.dataSource, - queryParameters: [ - Parameters.apiVersion, - Parameters.skipIndexerResetRequirementForCache, - ], - urlParameters: [Parameters.endpoint, Parameters.dataSourceName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/datasources('{dataSourceName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.dataSourceName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/datasources('{dataSourceName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexerDataSource, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.dataSourceName], - headerParameters: [Parameters.accept], - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/datasources", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListDataSourcesResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion, Parameters.select], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOperationSpec: coreClient.OperationSpec = { - path: "/datasources", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SearchIndexerDataSource, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.dataSource, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/index.ts b/sdk/search/search-documents/src/generated/service/operations/index.ts deleted file mode 100644 index 7b86b827f244..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/index.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./knowledgeBases.js"; -export * from "./knowledgeSources.js"; -export * from "./dataSources.js"; -export * from "./indexers.js"; -export * from "./skillsets.js"; -export * from "./synonymMaps.js"; -export * from "./indexes.js"; -export * from "./aliases.js"; diff --git a/sdk/search/search-documents/src/generated/service/operations/indexers.ts b/sdk/search/search-documents/src/generated/service/operations/indexers.ts deleted file mode 100644 index 460a3c798361..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/indexers.ts +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { Indexers } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - IndexersResetOptionalParams, - IndexersResetDocsOptionalParams, - IndexerResyncBody, - IndexersResyncOptionalParams, - IndexersRunOptionalParams, - SearchIndexer, - IndexersCreateOrUpdateOptionalParams, - IndexersCreateOrUpdateResponse, - IndexersDeleteOptionalParams, - IndexersGetOptionalParams, - IndexersGetResponse, - IndexersListOptionalParams, - IndexersListResponse, - IndexersCreateOptionalParams, - IndexersCreateResponse, - IndexersGetStatusOptionalParams, - IndexersGetStatusResponse, -} from "../models/index.js"; - -/** Class containing Indexers operations. */ -export class IndexersImpl implements Indexers { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class Indexers class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Resets the change tracking state associated with an indexer. - * @param indexerName The name of the indexer to reset. - * @param options The options parameters. - */ - reset( - indexerName: string, - options?: IndexersResetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - resetOperationSpec, - ); - } - - /** - * Resets specific documents in the datasource to be selectively re-ingested by the indexer. - * @param indexerName The name of the indexer to reset documents for. - * @param options The options parameters. - */ - resetDocs( - indexerName: string, - options?: IndexersResetDocsOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - resetDocsOperationSpec, - ); - } - - /** - * Resync selective options from the datasource to be re-ingested by the indexer. - * @param indexerName The name of the indexer to resync for. - * @param indexerResync - * @param options The options parameters. - */ - resync( - indexerName: string, - indexerResync: IndexerResyncBody, - options?: IndexersResyncOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, indexerResync, options }, - resyncOperationSpec, - ); - } - - /** - * Runs an indexer on-demand. - * @param indexerName The name of the indexer to run. - * @param options The options parameters. - */ - run(indexerName: string, options?: IndexersRunOptionalParams): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - runOperationSpec, - ); - } - - /** - * Creates a new indexer or updates an indexer if it already exists. - * @param indexerName The name of the indexer to create or update. - * @param indexer The definition of the indexer to create or update. - * @param options The options parameters. - */ - createOrUpdate( - indexerName: string, - indexer: SearchIndexer, - options?: IndexersCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, indexer, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes an indexer. - * @param indexerName The name of the indexer to delete. - * @param options The options parameters. - */ - delete( - indexerName: string, - options?: IndexersDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves an indexer definition. - * @param indexerName The name of the indexer to retrieve. - * @param options The options parameters. - */ - get( - indexerName: string, - options?: IndexersGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - getOperationSpec, - ); - } - - /** - * Lists all indexers available for a search service. - * @param options The options parameters. - */ - list(options?: IndexersListOptionalParams): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new indexer. - * @param indexer The definition of the indexer to create. - * @param options The options parameters. - */ - create( - indexer: SearchIndexer, - options?: IndexersCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexer, options }, - createOperationSpec, - ); - } - - /** - * Returns the current status and execution history of an indexer. - * @param indexerName The name of the indexer for which to retrieve status. - * @param options The options parameters. - */ - getStatus( - indexerName: string, - options?: IndexersGetStatusOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - getStatusOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const resetOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')/search.reset", - httpMethod: "POST", - responses: { - 204: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.accept], - serializer, -}; -const resetDocsOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')/search.resetdocs", - httpMethod: "POST", - responses: { - 204: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.keysOrIds, - queryParameters: [Parameters.apiVersion, Parameters.overwrite], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const resyncOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')/search.resync", - httpMethod: "POST", - responses: { - 204: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.indexerResync, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const runOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')/search.run", - httpMethod: "POST", - responses: { - 202: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.accept], - serializer, -}; -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexer, - }, - 201: { - bodyMapper: Mappers.SearchIndexer, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.indexer, - queryParameters: [ - Parameters.apiVersion, - Parameters.skipIndexerResetRequirementForCache, - Parameters.disableCacheReprocessingChangeDetection, - ], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexer, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.accept], - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/indexers", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListIndexersResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion, Parameters.select], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOperationSpec: coreClient.OperationSpec = { - path: "/indexers", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SearchIndexer, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.indexer, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const getStatusOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')/search.status", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexerStatus, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.accept], - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/indexes.ts b/sdk/search/search-documents/src/generated/service/operations/indexes.ts deleted file mode 100644 index 7487b922cf78..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/indexes.ts +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { Indexes } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - SearchIndex, - IndexesCreateOptionalParams, - IndexesCreateResponse, - IndexesListOptionalParams, - IndexesListResponse, - IndexesCreateOrUpdateOptionalParams, - IndexesCreateOrUpdateResponse, - IndexesDeleteOptionalParams, - IndexesGetOptionalParams, - IndexesGetResponse, - IndexesGetStatisticsOptionalParams, - IndexesGetStatisticsResponse, - AnalyzeRequest, - IndexesAnalyzeOptionalParams, - IndexesAnalyzeResponse, -} from "../models/index.js"; - -/** Class containing Indexes operations. */ -export class IndexesImpl implements Indexes { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class Indexes class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new search index. - * @param index The definition of the index to create. - * @param options The options parameters. - */ - create( - index: SearchIndex, - options?: IndexesCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { index, options }, - createOperationSpec, - ); - } - - /** - * Lists all indexes available for a search service. - * @param options The options parameters. - */ - list(options?: IndexesListOptionalParams): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new search index or updates an index if it already exists. - * @param indexName The definition of the index to create or update. - * @param index The definition of the index to create or update. - * @param options The options parameters. - */ - createOrUpdate( - indexName: string, - index: SearchIndex, - options?: IndexesCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexName, index, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes a search index and all the documents it contains. This operation is permanent, with no - * recovery option. Make sure you have a master copy of your index definition, data ingestion code, and - * a backup of the primary data source in case you need to re-build the index. - * @param indexName The name of the index to delete. - * @param options The options parameters. - */ - delete( - indexName: string, - options?: IndexesDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves an index definition. - * @param indexName The name of the index to retrieve. - * @param options The options parameters. - */ - get( - indexName: string, - options?: IndexesGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexName, options }, - getOperationSpec, - ); - } - - /** - * Returns statistics for the given index, including a document count and storage usage. - * @param indexName The name of the index for which to retrieve statistics. - * @param options The options parameters. - */ - getStatistics( - indexName: string, - options?: IndexesGetStatisticsOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexName, options }, - getStatisticsOperationSpec, - ); - } - - /** - * Shows how an analyzer breaks text into tokens. - * @param indexName The name of the index for which to test an analyzer. - * @param request The text and analyzer or analysis components to test. - * @param options The options parameters. - */ - analyze( - indexName: string, - request: AnalyzeRequest, - options?: IndexesAnalyzeOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexName, request, options }, - analyzeOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOperationSpec: coreClient.OperationSpec = { - path: "/indexes", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SearchIndex, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.index, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/indexes", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListIndexesResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion, Parameters.select], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/indexes('{indexName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SearchIndex, - }, - 201: { - bodyMapper: Mappers.SearchIndex, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.index, - queryParameters: [Parameters.apiVersion, Parameters.allowIndexDowntime], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/indexes('{indexName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/indexes('{indexName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchIndex, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept], - serializer, -}; -const getStatisticsOperationSpec: coreClient.OperationSpec = { - path: "/indexes('{indexName}')/search.stats", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.GetIndexStatisticsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.accept], - serializer, -}; -const analyzeOperationSpec: coreClient.OperationSpec = { - path: "/indexes('{indexName}')/search.analyze", - httpMethod: "POST", - responses: { - 200: { - bodyMapper: Mappers.AnalyzeResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.request, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.indexName], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/knowledgeBases.ts b/sdk/search/search-documents/src/generated/service/operations/knowledgeBases.ts deleted file mode 100644 index c30c180c4976..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/knowledgeBases.ts +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { KnowledgeBases } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - KnowledgeBase, - KnowledgeBasesCreateOrUpdateOptionalParams, - KnowledgeBasesCreateOrUpdateResponse, - KnowledgeBasesDeleteOptionalParams, - KnowledgeBasesGetOptionalParams, - KnowledgeBasesGetResponse, - KnowledgeBasesListOptionalParams, - KnowledgeBasesListResponse, - KnowledgeBasesCreateOptionalParams, - KnowledgeBasesCreateResponse, -} from "../models/index.js"; - -/** Class containing KnowledgeBases operations. */ -export class KnowledgeBasesImpl implements KnowledgeBases { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class KnowledgeBases class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new knowledge base or updates an knowledge base if it already exists. - * @param knowledgeBaseName The name of the knowledge base to create or update. - * @param knowledgeBase The definition of the knowledge base to create or update. - * @param options The options parameters. - */ - createOrUpdate( - knowledgeBaseName: string, - knowledgeBase: KnowledgeBase, - options?: KnowledgeBasesCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { knowledgeBaseName, knowledgeBase, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes an existing knowledge base. - * @param knowledgeBaseName The name of the knowledge base to delete. - * @param options The options parameters. - */ - delete( - knowledgeBaseName: string, - options?: KnowledgeBasesDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { knowledgeBaseName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves an knowledge base definition. - * @param knowledgeBaseName The name of the knowledge base to retrieve. - * @param options The options parameters. - */ - get( - knowledgeBaseName: string, - options?: KnowledgeBasesGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { knowledgeBaseName, options }, - getOperationSpec, - ); - } - - /** - * Lists all knowledge bases available for a search service. - * @param options The options parameters. - */ - list( - options?: KnowledgeBasesListOptionalParams, - ): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new knowledge base. - * @param knowledgeBase The definition of the knowledge base to create. - * @param options The options parameters. - */ - create( - knowledgeBase: KnowledgeBase, - options?: KnowledgeBasesCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { knowledgeBase, options }, - createOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/knowledgebases('{knowledgeBaseName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.KnowledgeBase, - }, - 201: { - bodyMapper: Mappers.KnowledgeBase, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.knowledgeBase, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.knowledgeBaseName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/knowledgebases('{knowledgeBaseName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.knowledgeBaseName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/knowledgebases('{knowledgeBaseName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.KnowledgeBase, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.knowledgeBaseName], - headerParameters: [Parameters.accept], - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/knowledgebases", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListKnowledgeBasesResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOperationSpec: coreClient.OperationSpec = { - path: "/knowledgebases", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.KnowledgeBase, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.knowledgeBase, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/knowledgeSources.ts b/sdk/search/search-documents/src/generated/service/operations/knowledgeSources.ts deleted file mode 100644 index 2a2930c6f1ec..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/knowledgeSources.ts +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { KnowledgeSources } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - KnowledgeSourceUnion, - KnowledgeSourcesCreateOrUpdateOptionalParams, - KnowledgeSourcesCreateOrUpdateResponse, - KnowledgeSourcesDeleteOptionalParams, - KnowledgeSourcesGetOptionalParams, - KnowledgeSourcesGetResponse, - KnowledgeSourcesListOptionalParams, - KnowledgeSourcesListResponse, - KnowledgeSourcesCreateOptionalParams, - KnowledgeSourcesCreateResponse, - KnowledgeSourcesGetStatusOptionalParams, - KnowledgeSourcesGetStatusResponse, -} from "../models/index.js"; - -/** Class containing KnowledgeSources operations. */ -export class KnowledgeSourcesImpl implements KnowledgeSources { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class KnowledgeSources class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new knowledge source or updates an knowledge source if it already exists. - * @param sourceName The name of the knowledge source to create or update. - * @param knowledgeSource The definition of the knowledge source to create or update. - * @param options The options parameters. - */ - createOrUpdate( - sourceName: string, - knowledgeSource: KnowledgeSourceUnion, - options?: KnowledgeSourcesCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { sourceName, knowledgeSource, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes an existing knowledge source. - * @param sourceName The name of the knowledge source to delete. - * @param options The options parameters. - */ - delete( - sourceName: string, - options?: KnowledgeSourcesDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { sourceName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves a knowledge source definition. - * @param sourceName The name of the knowledge source to retrieve. - * @param options The options parameters. - */ - get( - sourceName: string, - options?: KnowledgeSourcesGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { sourceName, options }, - getOperationSpec, - ); - } - - /** - * Lists all knowledge sources available for a search service. - * @param options The options parameters. - */ - list( - options?: KnowledgeSourcesListOptionalParams, - ): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new knowledge source. - * @param knowledgeSource The definition of the knowledge source to create. - * @param options The options parameters. - */ - create( - knowledgeSource: KnowledgeSourceUnion, - options?: KnowledgeSourcesCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { knowledgeSource, options }, - createOperationSpec, - ); - } - - /** - * Returns the current status and synchronization history of a knowledge source. - * @param sourceName The name of the knowledge source for which to retrieve status. - * @param options The options parameters. - */ - getStatus( - sourceName: string, - options?: KnowledgeSourcesGetStatusOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { sourceName, options }, - getStatusOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/knowledgesources('{sourceName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.KnowledgeSource, - }, - 201: { - bodyMapper: Mappers.KnowledgeSource, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.knowledgeSource, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.sourceName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/knowledgesources('{sourceName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.sourceName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/knowledgesources('{sourceName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.KnowledgeSource, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.sourceName], - headerParameters: [Parameters.accept], - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/knowledgesources", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListKnowledgeSourcesResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOperationSpec: coreClient.OperationSpec = { - path: "/knowledgesources", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.KnowledgeSource, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.knowledgeSource, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const getStatusOperationSpec: coreClient.OperationSpec = { - path: "/knowledgesources('{sourceName}')/status", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.KnowledgeSourceStatus, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.sourceName], - headerParameters: [Parameters.accept], - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts deleted file mode 100644 index 770caf4c4473..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { Skillsets } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - SearchIndexerSkillset, - SkillsetsCreateOrUpdateOptionalParams, - SkillsetsCreateOrUpdateResponse, - SkillsetsDeleteOptionalParams, - SkillsetsGetOptionalParams, - SkillsetsGetResponse, - SkillsetsListOptionalParams, - SkillsetsListResponse, - SkillsetsCreateOptionalParams, - SkillsetsCreateResponse, - SkillNames, - SkillsetsResetSkillsOptionalParams, -} from "../models/index.js"; - -/** Class containing Skillsets operations. */ -export class SkillsetsImpl implements Skillsets { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class Skillsets class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new skillset in a search service or updates the skillset if it already exists. - * @param skillsetName The name of the skillset to create or update. - * @param skillset The skillset containing one or more skills to create or update in a search service. - * @param options The options parameters. - */ - createOrUpdate( - skillsetName: string, - skillset: SearchIndexerSkillset, - options?: SkillsetsCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { skillsetName, skillset, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes a skillset in a search service. - * @param skillsetName The name of the skillset to delete. - * @param options The options parameters. - */ - delete( - skillsetName: string, - options?: SkillsetsDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { skillsetName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves a skillset in a search service. - * @param skillsetName The name of the skillset to retrieve. - * @param options The options parameters. - */ - get( - skillsetName: string, - options?: SkillsetsGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { skillsetName, options }, - getOperationSpec, - ); - } - - /** - * List all skillsets in a search service. - * @param options The options parameters. - */ - list(options?: SkillsetsListOptionalParams): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new skillset in a search service. - * @param skillset The skillset containing one or more skills to create in a search service. - * @param options The options parameters. - */ - create( - skillset: SearchIndexerSkillset, - options?: SkillsetsCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { skillset, options }, - createOperationSpec, - ); - } - - /** - * Reset an existing skillset in a search service. - * @param skillsetName The name of the skillset to reset. - * @param skillNames The names of skills to reset. - * @param options The options parameters. - */ - resetSkills( - skillsetName: string, - skillNames: SkillNames, - options?: SkillsetsResetSkillsOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { skillsetName, skillNames, options }, - resetSkillsOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/skillsets('{skillsetName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexerSkillset, - }, - 201: { - bodyMapper: Mappers.SearchIndexerSkillset, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.skillset, - queryParameters: [ - Parameters.apiVersion, - Parameters.skipIndexerResetRequirementForCache, - Parameters.disableCacheReprocessingChangeDetection, - ], - urlParameters: [Parameters.endpoint, Parameters.skillsetName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/skillsets('{skillsetName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.skillsetName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/skillsets('{skillsetName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchIndexerSkillset, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.skillsetName], - headerParameters: [Parameters.accept], - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/skillsets", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListSkillsetsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion, Parameters.select], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOperationSpec: coreClient.OperationSpec = { - path: "/skillsets", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SearchIndexerSkillset, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.skillset, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const resetSkillsOperationSpec: coreClient.OperationSpec = { - path: "/skillsets('{skillsetName}')/search.resetskills", - httpMethod: "POST", - responses: { - 204: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.skillNames, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.skillsetName], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts deleted file mode 100644 index 8bd281f6aeda..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { SynonymMaps } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - SynonymMap, - SynonymMapsCreateOrUpdateOptionalParams, - SynonymMapsCreateOrUpdateResponse, - SynonymMapsDeleteOptionalParams, - SynonymMapsGetOptionalParams, - SynonymMapsGetResponse, - SynonymMapsListOptionalParams, - SynonymMapsListResponse, - SynonymMapsCreateOptionalParams, - SynonymMapsCreateResponse, -} from "../models/index.js"; - -/** Class containing SynonymMaps operations. */ -export class SynonymMapsImpl implements SynonymMaps { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class SynonymMaps class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new synonym map or updates a synonym map if it already exists. - * @param synonymMapName The name of the synonym map to create or update. - * @param synonymMap The definition of the synonym map to create or update. - * @param options The options parameters. - */ - createOrUpdate( - synonymMapName: string, - synonymMap: SynonymMap, - options?: SynonymMapsCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { synonymMapName, synonymMap, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes a synonym map. - * @param synonymMapName The name of the synonym map to delete. - * @param options The options parameters. - */ - delete( - synonymMapName: string, - options?: SynonymMapsDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { synonymMapName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves a synonym map definition. - * @param synonymMapName The name of the synonym map to retrieve. - * @param options The options parameters. - */ - get( - synonymMapName: string, - options?: SynonymMapsGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { synonymMapName, options }, - getOperationSpec, - ); - } - - /** - * Lists all synonym maps available for a search service. - * @param options The options parameters. - */ - list( - options?: SynonymMapsListOptionalParams, - ): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new synonym map. - * @param synonymMap The definition of the synonym map to create. - * @param options The options parameters. - */ - create( - synonymMap: SynonymMap, - options?: SynonymMapsCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { synonymMap, options }, - createOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/synonymmaps('{synonymMapName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SynonymMap, - }, - 201: { - bodyMapper: Mappers.SynonymMap, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.synonymMap, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.synonymMapName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/synonymmaps('{synonymMapName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.synonymMapName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/synonymmaps('{synonymMapName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SynonymMap, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.synonymMapName], - headerParameters: [Parameters.accept], - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/synonymmaps", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListSynonymMapsResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion, Parameters.select], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOperationSpec: coreClient.OperationSpec = { - path: "/synonymmaps", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SynonymMap, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.synonymMap, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts deleted file mode 100644 index 1a0751eb6ed4..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - SearchAlias, - AliasesCreateOptionalParams, - AliasesCreateResponse, - AliasesListOptionalParams, - AliasesListResponse, - AliasesCreateOrUpdateOptionalParams, - AliasesCreateOrUpdateResponse, - AliasesDeleteOptionalParams, - AliasesGetOptionalParams, - AliasesGetResponse, -} from "../models/index.js"; - -/** Interface representing a Aliases. */ -export interface Aliases { - /** - * Creates a new search alias. - * @param alias The definition of the alias to create. - * @param options The options parameters. - */ - create( - alias: SearchAlias, - options?: AliasesCreateOptionalParams, - ): Promise; - /** - * Lists all aliases available for a search service. - * @param options The options parameters. - */ - list(options?: AliasesListOptionalParams): Promise; - /** - * Creates a new search alias or updates an alias if it already exists. - * @param aliasName The definition of the alias to create or update. - * @param alias The definition of the alias to create or update. - * @param options The options parameters. - */ - createOrUpdate( - aliasName: string, - alias: SearchAlias, - options?: AliasesCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no - * recovery option. The mapped index is untouched by this operation. - * @param aliasName The name of the alias to delete. - * @param options The options parameters. - */ - delete( - aliasName: string, - options?: AliasesDeleteOptionalParams, - ): Promise; - /** - * Retrieves an alias definition. - * @param aliasName The name of the alias to retrieve. - * @param options The options parameters. - */ - get( - aliasName: string, - options?: AliasesGetOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts deleted file mode 100644 index 36a165a3974f..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - SearchIndexerDataSource, - DataSourcesCreateOrUpdateOptionalParams, - DataSourcesCreateOrUpdateResponse, - DataSourcesDeleteOptionalParams, - DataSourcesGetOptionalParams, - DataSourcesGetResponse, - DataSourcesListOptionalParams, - DataSourcesListResponse, - DataSourcesCreateOptionalParams, - DataSourcesCreateResponse, -} from "../models/index.js"; - -/** Interface representing a DataSources. */ -export interface DataSources { - /** - * Creates a new datasource or updates a datasource if it already exists. - * @param dataSourceName The name of the datasource to create or update. - * @param dataSource The definition of the datasource to create or update. - * @param options The options parameters. - */ - createOrUpdate( - dataSourceName: string, - dataSource: SearchIndexerDataSource, - options?: DataSourcesCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes a datasource. - * @param dataSourceName The name of the datasource to delete. - * @param options The options parameters. - */ - delete( - dataSourceName: string, - options?: DataSourcesDeleteOptionalParams, - ): Promise; - /** - * Retrieves a datasource definition. - * @param dataSourceName The name of the datasource to retrieve. - * @param options The options parameters. - */ - get( - dataSourceName: string, - options?: DataSourcesGetOptionalParams, - ): Promise; - /** - * Lists all datasources available for a search service. - * @param options The options parameters. - */ - list( - options?: DataSourcesListOptionalParams, - ): Promise; - /** - * Creates a new datasource. - * @param dataSource The definition of the datasource to create. - * @param options The options parameters. - */ - create( - dataSource: SearchIndexerDataSource, - options?: DataSourcesCreateOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts deleted file mode 100644 index 7b86b827f244..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -export * from "./knowledgeBases.js"; -export * from "./knowledgeSources.js"; -export * from "./dataSources.js"; -export * from "./indexers.js"; -export * from "./skillsets.js"; -export * from "./synonymMaps.js"; -export * from "./indexes.js"; -export * from "./aliases.js"; diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts deleted file mode 100644 index 3a93b673c616..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - IndexersResetOptionalParams, - IndexersResetDocsOptionalParams, - IndexerResyncBody, - IndexersResyncOptionalParams, - IndexersRunOptionalParams, - SearchIndexer, - IndexersCreateOrUpdateOptionalParams, - IndexersCreateOrUpdateResponse, - IndexersDeleteOptionalParams, - IndexersGetOptionalParams, - IndexersGetResponse, - IndexersListOptionalParams, - IndexersListResponse, - IndexersCreateOptionalParams, - IndexersCreateResponse, - IndexersGetStatusOptionalParams, - IndexersGetStatusResponse, -} from "../models/index.js"; - -/** Interface representing a Indexers. */ -export interface Indexers { - /** - * Resets the change tracking state associated with an indexer. - * @param indexerName The name of the indexer to reset. - * @param options The options parameters. - */ - reset( - indexerName: string, - options?: IndexersResetOptionalParams, - ): Promise; - /** - * Resets specific documents in the datasource to be selectively re-ingested by the indexer. - * @param indexerName The name of the indexer to reset documents for. - * @param options The options parameters. - */ - resetDocs( - indexerName: string, - options?: IndexersResetDocsOptionalParams, - ): Promise; - /** - * Resync selective options from the datasource to be re-ingested by the indexer. - * @param indexerName The name of the indexer to resync for. - * @param indexerResync - * @param options The options parameters. - */ - resync( - indexerName: string, - indexerResync: IndexerResyncBody, - options?: IndexersResyncOptionalParams, - ): Promise; - /** - * Runs an indexer on-demand. - * @param indexerName The name of the indexer to run. - * @param options The options parameters. - */ - run(indexerName: string, options?: IndexersRunOptionalParams): Promise; - /** - * Creates a new indexer or updates an indexer if it already exists. - * @param indexerName The name of the indexer to create or update. - * @param indexer The definition of the indexer to create or update. - * @param options The options parameters. - */ - createOrUpdate( - indexerName: string, - indexer: SearchIndexer, - options?: IndexersCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes an indexer. - * @param indexerName The name of the indexer to delete. - * @param options The options parameters. - */ - delete( - indexerName: string, - options?: IndexersDeleteOptionalParams, - ): Promise; - /** - * Retrieves an indexer definition. - * @param indexerName The name of the indexer to retrieve. - * @param options The options parameters. - */ - get( - indexerName: string, - options?: IndexersGetOptionalParams, - ): Promise; - /** - * Lists all indexers available for a search service. - * @param options The options parameters. - */ - list(options?: IndexersListOptionalParams): Promise; - /** - * Creates a new indexer. - * @param indexer The definition of the indexer to create. - * @param options The options parameters. - */ - create( - indexer: SearchIndexer, - options?: IndexersCreateOptionalParams, - ): Promise; - /** - * Returns the current status and execution history of an indexer. - * @param indexerName The name of the indexer for which to retrieve status. - * @param options The options parameters. - */ - getStatus( - indexerName: string, - options?: IndexersGetStatusOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts deleted file mode 100644 index 97c64eb3214c..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - SearchIndex, - IndexesCreateOptionalParams, - IndexesCreateResponse, - IndexesListOptionalParams, - IndexesListResponse, - IndexesCreateOrUpdateOptionalParams, - IndexesCreateOrUpdateResponse, - IndexesDeleteOptionalParams, - IndexesGetOptionalParams, - IndexesGetResponse, - IndexesGetStatisticsOptionalParams, - IndexesGetStatisticsResponse, - AnalyzeRequest, - IndexesAnalyzeOptionalParams, - IndexesAnalyzeResponse, -} from "../models/index.js"; - -/** Interface representing a Indexes. */ -export interface Indexes { - /** - * Creates a new search index. - * @param index The definition of the index to create. - * @param options The options parameters. - */ - create( - index: SearchIndex, - options?: IndexesCreateOptionalParams, - ): Promise; - /** - * Lists all indexes available for a search service. - * @param options The options parameters. - */ - list(options?: IndexesListOptionalParams): Promise; - /** - * Creates a new search index or updates an index if it already exists. - * @param indexName The definition of the index to create or update. - * @param index The definition of the index to create or update. - * @param options The options parameters. - */ - createOrUpdate( - indexName: string, - index: SearchIndex, - options?: IndexesCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes a search index and all the documents it contains. This operation is permanent, with no - * recovery option. Make sure you have a master copy of your index definition, data ingestion code, and - * a backup of the primary data source in case you need to re-build the index. - * @param indexName The name of the index to delete. - * @param options The options parameters. - */ - delete( - indexName: string, - options?: IndexesDeleteOptionalParams, - ): Promise; - /** - * Retrieves an index definition. - * @param indexName The name of the index to retrieve. - * @param options The options parameters. - */ - get( - indexName: string, - options?: IndexesGetOptionalParams, - ): Promise; - /** - * Returns statistics for the given index, including a document count and storage usage. - * @param indexName The name of the index for which to retrieve statistics. - * @param options The options parameters. - */ - getStatistics( - indexName: string, - options?: IndexesGetStatisticsOptionalParams, - ): Promise; - /** - * Shows how an analyzer breaks text into tokens. - * @param indexName The name of the index for which to test an analyzer. - * @param request The text and analyzer or analysis components to test. - * @param options The options parameters. - */ - analyze( - indexName: string, - request: AnalyzeRequest, - options?: IndexesAnalyzeOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/knowledgeBases.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/knowledgeBases.ts deleted file mode 100644 index e13947b43db8..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/knowledgeBases.ts +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - KnowledgeBase, - KnowledgeBasesCreateOrUpdateOptionalParams, - KnowledgeBasesCreateOrUpdateResponse, - KnowledgeBasesDeleteOptionalParams, - KnowledgeBasesGetOptionalParams, - KnowledgeBasesGetResponse, - KnowledgeBasesListOptionalParams, - KnowledgeBasesListResponse, - KnowledgeBasesCreateOptionalParams, - KnowledgeBasesCreateResponse, -} from "../models/index.js"; - -/** Interface representing a KnowledgeBases. */ -export interface KnowledgeBases { - /** - * Creates a new knowledge base or updates an knowledge base if it already exists. - * @param knowledgeBaseName The name of the knowledge base to create or update. - * @param knowledgeBase The definition of the knowledge base to create or update. - * @param options The options parameters. - */ - createOrUpdate( - knowledgeBaseName: string, - knowledgeBase: KnowledgeBase, - options?: KnowledgeBasesCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes an existing knowledge base. - * @param knowledgeBaseName The name of the knowledge base to delete. - * @param options The options parameters. - */ - delete( - knowledgeBaseName: string, - options?: KnowledgeBasesDeleteOptionalParams, - ): Promise; - /** - * Retrieves an knowledge base definition. - * @param knowledgeBaseName The name of the knowledge base to retrieve. - * @param options The options parameters. - */ - get( - knowledgeBaseName: string, - options?: KnowledgeBasesGetOptionalParams, - ): Promise; - /** - * Lists all knowledge bases available for a search service. - * @param options The options parameters. - */ - list( - options?: KnowledgeBasesListOptionalParams, - ): Promise; - /** - * Creates a new knowledge base. - * @param knowledgeBase The definition of the knowledge base to create. - * @param options The options parameters. - */ - create( - knowledgeBase: KnowledgeBase, - options?: KnowledgeBasesCreateOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/knowledgeSources.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/knowledgeSources.ts deleted file mode 100644 index cb8ed0e6a647..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/knowledgeSources.ts +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - KnowledgeSourceUnion, - KnowledgeSourcesCreateOrUpdateOptionalParams, - KnowledgeSourcesCreateOrUpdateResponse, - KnowledgeSourcesDeleteOptionalParams, - KnowledgeSourcesGetOptionalParams, - KnowledgeSourcesGetResponse, - KnowledgeSourcesListOptionalParams, - KnowledgeSourcesListResponse, - KnowledgeSourcesCreateOptionalParams, - KnowledgeSourcesCreateResponse, - KnowledgeSourcesGetStatusOptionalParams, - KnowledgeSourcesGetStatusResponse, -} from "../models/index.js"; - -/** Interface representing a KnowledgeSources. */ -export interface KnowledgeSources { - /** - * Creates a new knowledge source or updates an knowledge source if it already exists. - * @param sourceName The name of the knowledge source to create or update. - * @param knowledgeSource The definition of the knowledge source to create or update. - * @param options The options parameters. - */ - createOrUpdate( - sourceName: string, - knowledgeSource: KnowledgeSourceUnion, - options?: KnowledgeSourcesCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes an existing knowledge source. - * @param sourceName The name of the knowledge source to delete. - * @param options The options parameters. - */ - delete( - sourceName: string, - options?: KnowledgeSourcesDeleteOptionalParams, - ): Promise; - /** - * Retrieves a knowledge source definition. - * @param sourceName The name of the knowledge source to retrieve. - * @param options The options parameters. - */ - get( - sourceName: string, - options?: KnowledgeSourcesGetOptionalParams, - ): Promise; - /** - * Lists all knowledge sources available for a search service. - * @param options The options parameters. - */ - list( - options?: KnowledgeSourcesListOptionalParams, - ): Promise; - /** - * Creates a new knowledge source. - * @param knowledgeSource The definition of the knowledge source to create. - * @param options The options parameters. - */ - create( - knowledgeSource: KnowledgeSourceUnion, - options?: KnowledgeSourcesCreateOptionalParams, - ): Promise; - /** - * Returns the current status and synchronization history of a knowledge source. - * @param sourceName The name of the knowledge source for which to retrieve status. - * @param options The options parameters. - */ - getStatus( - sourceName: string, - options?: KnowledgeSourcesGetStatusOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts deleted file mode 100644 index 2287fd215c5c..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - SearchIndexerSkillset, - SkillsetsCreateOrUpdateOptionalParams, - SkillsetsCreateOrUpdateResponse, - SkillsetsDeleteOptionalParams, - SkillsetsGetOptionalParams, - SkillsetsGetResponse, - SkillsetsListOptionalParams, - SkillsetsListResponse, - SkillsetsCreateOptionalParams, - SkillsetsCreateResponse, - SkillNames, - SkillsetsResetSkillsOptionalParams, -} from "../models/index.js"; - -/** Interface representing a Skillsets. */ -export interface Skillsets { - /** - * Creates a new skillset in a search service or updates the skillset if it already exists. - * @param skillsetName The name of the skillset to create or update. - * @param skillset The skillset containing one or more skills to create or update in a search service. - * @param options The options parameters. - */ - createOrUpdate( - skillsetName: string, - skillset: SearchIndexerSkillset, - options?: SkillsetsCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes a skillset in a search service. - * @param skillsetName The name of the skillset to delete. - * @param options The options parameters. - */ - delete( - skillsetName: string, - options?: SkillsetsDeleteOptionalParams, - ): Promise; - /** - * Retrieves a skillset in a search service. - * @param skillsetName The name of the skillset to retrieve. - * @param options The options parameters. - */ - get( - skillsetName: string, - options?: SkillsetsGetOptionalParams, - ): Promise; - /** - * List all skillsets in a search service. - * @param options The options parameters. - */ - list(options?: SkillsetsListOptionalParams): Promise; - /** - * Creates a new skillset in a search service. - * @param skillset The skillset containing one or more skills to create in a search service. - * @param options The options parameters. - */ - create( - skillset: SearchIndexerSkillset, - options?: SkillsetsCreateOptionalParams, - ): Promise; - /** - * Reset an existing skillset in a search service. - * @param skillsetName The name of the skillset to reset. - * @param skillNames The names of skills to reset. - * @param options The options parameters. - */ - resetSkills( - skillsetName: string, - skillNames: SkillNames, - options?: SkillsetsResetSkillsOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts deleted file mode 100644 index 12eefed6a043..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - SynonymMap, - SynonymMapsCreateOrUpdateOptionalParams, - SynonymMapsCreateOrUpdateResponse, - SynonymMapsDeleteOptionalParams, - SynonymMapsGetOptionalParams, - SynonymMapsGetResponse, - SynonymMapsListOptionalParams, - SynonymMapsListResponse, - SynonymMapsCreateOptionalParams, - SynonymMapsCreateResponse, -} from "../models/index.js"; - -/** Interface representing a SynonymMaps. */ -export interface SynonymMaps { - /** - * Creates a new synonym map or updates a synonym map if it already exists. - * @param synonymMapName The name of the synonym map to create or update. - * @param synonymMap The definition of the synonym map to create or update. - * @param options The options parameters. - */ - createOrUpdate( - synonymMapName: string, - synonymMap: SynonymMap, - options?: SynonymMapsCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes a synonym map. - * @param synonymMapName The name of the synonym map to delete. - * @param options The options parameters. - */ - delete( - synonymMapName: string, - options?: SynonymMapsDeleteOptionalParams, - ): Promise; - /** - * Retrieves a synonym map definition. - * @param synonymMapName The name of the synonym map to retrieve. - * @param options The options parameters. - */ - get( - synonymMapName: string, - options?: SynonymMapsGetOptionalParams, - ): Promise; - /** - * Lists all synonym maps available for a search service. - * @param options The options parameters. - */ - list( - options?: SynonymMapsListOptionalParams, - ): Promise; - /** - * Creates a new synonym map. - * @param synonymMap The definition of the synonym map to create. - * @param options The options parameters. - */ - create( - synonymMap: SynonymMap, - options?: SynonymMapsCreateOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts deleted file mode 100644 index 045f8b840439..000000000000 --- a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import * as coreClient from "@azure/core-client"; -import * as coreHttpCompat from "@azure/core-http-compat"; -import { - PipelineRequest, - PipelineResponse, - SendRequest, -} from "@azure/core-rest-pipeline"; -import { - KnowledgeBasesImpl, - KnowledgeSourcesImpl, - DataSourcesImpl, - IndexersImpl, - SkillsetsImpl, - SynonymMapsImpl, - IndexesImpl, - AliasesImpl, -} from "./operations/index.js"; -import { - KnowledgeBases, - KnowledgeSources, - DataSources, - Indexers, - Skillsets, - SynonymMaps, - Indexes, - Aliases, -} from "./operationsInterfaces/index.js"; -import * as Parameters from "./models/parameters.js"; -import * as Mappers from "./models/mappers.js"; -import { - ApiVersion20251101Preview, - SearchServiceClientOptionalParams, - GetServiceStatisticsOptionalParams, - GetServiceStatisticsResponse, - GetIndexStatsSummaryOptionalParams, - GetIndexStatsSummaryResponse, -} from "./models/index.js"; - -/** @internal */ -export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { - endpoint: string; - apiVersion: ApiVersion20251101Preview; - - /** - * Initializes a new instance of the SearchServiceClient class. - * @param endpoint The endpoint URL of the search service. - * @param apiVersion Api Version - * @param options The parameter options - */ - constructor( - endpoint: string, - apiVersion: ApiVersion20251101Preview, - options?: SearchServiceClientOptionalParams, - ) { - if (endpoint === undefined) { - throw new Error("'endpoint' cannot be null"); - } - if (apiVersion === undefined) { - throw new Error("'apiVersion' cannot be null"); - } - - // Initializing default values for options - if (!options) { - options = {}; - } - const defaults: SearchServiceClientOptionalParams = { - requestContentType: "application/json; charset=utf-8", - }; - - const packageDetails = `azsdk-js-search-documents/12.3.0-beta.1`; - const userAgentPrefix = - options.userAgentOptions && options.userAgentOptions.userAgentPrefix - ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` - : `${packageDetails}`; - - const optionsWithDefaults = { - ...defaults, - ...options, - userAgentOptions: { - userAgentPrefix, - }, - endpoint: options.endpoint ?? options.baseUri ?? "{endpoint}", - }; - super(optionsWithDefaults); - // Parameter assignments - this.endpoint = endpoint; - this.apiVersion = apiVersion; - this.knowledgeBases = new KnowledgeBasesImpl(this); - this.knowledgeSources = new KnowledgeSourcesImpl(this); - this.dataSources = new DataSourcesImpl(this); - this.indexers = new IndexersImpl(this); - this.skillsets = new SkillsetsImpl(this); - this.synonymMaps = new SynonymMapsImpl(this); - this.indexes = new IndexesImpl(this); - this.aliases = new AliasesImpl(this); - this.addCustomApiVersionPolicy(apiVersion); - } - - /** A function that adds a policy that sets the api-version (or equivalent) to reflect the library version. */ - private addCustomApiVersionPolicy(apiVersion?: string) { - if (!apiVersion) { - return; - } - const apiVersionPolicy = { - name: "CustomApiVersionPolicy", - async sendRequest( - request: PipelineRequest, - next: SendRequest, - ): Promise { - const param = request.url.split("?"); - if (param.length > 1) { - const newParams = param[1].split("&").map((item) => { - if (item.indexOf("api-version") > -1) { - return "api-version=" + apiVersion; - } else { - return item; - } - }); - request.url = param[0] + "?" + newParams.join("&"); - } - return next(request); - }, - }; - this.pipeline.addPolicy(apiVersionPolicy); - } - - /** - * Gets service level statistics for a search service. - * @param options The options parameters. - */ - getServiceStatistics( - options?: GetServiceStatisticsOptionalParams, - ): Promise { - return this.sendOperationRequest( - { options }, - getServiceStatisticsOperationSpec, - ); - } - - /** - * Retrieves a summary of statistics for all indexes in the search service. - * @param options The options parameters. - */ - getIndexStatsSummary( - options?: GetIndexStatsSummaryOptionalParams, - ): Promise { - return this.sendOperationRequest( - { options }, - getIndexStatsSummaryOperationSpec, - ); - } - - knowledgeBases: KnowledgeBases; - knowledgeSources: KnowledgeSources; - dataSources: DataSources; - indexers: Indexers; - skillsets: Skillsets; - synonymMaps: SynonymMaps; - indexes: Indexes; - aliases: Aliases; -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const getServiceStatisticsOperationSpec: coreClient.OperationSpec = { - path: "/servicestats", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ServiceStatistics, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const getIndexStatsSummaryOperationSpec: coreClient.OperationSpec = { - path: "/indexstats", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListIndexStatsSummary, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 8eae99ceefeb..b31b1fec3280 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -43,13 +43,13 @@ export { SingleVectorFieldResult, TextResult, VectorsDebugInfo, -} from "./generated/data/models/index.js"; +} from "./models/azure/search/documents/index.js"; export { AzureBlobKnowledgeSourceParams, KnowledgeBaseActivityRecord as BaseKnowledgeBaseActivityRecord, KnowledgeBaseMessageContent as BaseKnowledgeBaseMessageContent, KnowledgeBaseReference as BaseKnowledgeBaseReference, - KnowledgeBaseRetrievalActivityRecord as BaseKnowledgeBaseRetrievalActivityRecord, + // KnowledgeBaseRetrievalActivityRecord as BaseKnowledgeBaseRetrievalActivityRecord, KnowledgeRetrievalIntentUnion as BaseKnowledgeRetrievalIntent, KnowledgeRetrievalOutputMode as BaseKnowledgeRetrievalOutputMode, KnowledgeRetrievalReasoningEffortUnion, @@ -59,54 +59,54 @@ export { IndexedSharePointKnowledgeSourceParams, KnowledgeBaseActivityRecordUnion as KnowledgeBaseActivityRecord, KnowledgeBaseAgenticReasoningActivityRecord, - KnowledgeBaseAzureBlobActivityArguments, - KnowledgeBaseAzureBlobActivityRecord, + // KnowledgeBaseAzureBlobActivityArguments, + // KnowledgeBaseAzureBlobActivityRecord, KnowledgeBaseAzureBlobReference, KnowledgeBaseErrorAdditionalInfo, KnowledgeBaseErrorDetail, - KnowledgeBaseIndexedOneLakeActivityArguments, - KnowledgeBaseIndexedOneLakeActivityRecord, + // KnowledgeBaseIndexedOneLakeActivityArguments, + // KnowledgeBaseIndexedOneLakeActivityRecord, KnowledgeBaseIndexedOneLakeReference, - KnowledgeBaseIndexedSharePointActivityArguments, - KnowledgeBaseIndexedSharePointActivityRecord, + // KnowledgeBaseIndexedSharePointActivityArguments, + // KnowledgeBaseIndexedSharePointActivityRecord, KnowledgeBaseIndexedSharePointReference, KnowledgeBaseMessage, KnowledgeBaseMessageContentUnion as KnowledgeBaseMessageContent, KnowledgeBaseMessageImageContent, - KnowledgeBaseMessageImageContentImage, + // KnowledgeBaseMessageImageContentImage, KnowledgeBaseMessageTextContent, KnowledgeBaseModelAnswerSynthesisActivityRecord, KnowledgeBaseModelQueryPlanningActivityRecord, KnowledgeBaseReferenceUnion as KnowledgeBaseReference, - KnowledgeBaseRemoteSharePointActivityArguments, - KnowledgeBaseRemoteSharePointActivityRecord, + // KnowledgeBaseRemoteSharePointActivityArguments, + // KnowledgeBaseRemoteSharePointActivityRecord, KnowledgeBaseRemoteSharePointReference, - KnowledgeBaseRetrievalActivityRecordUnion as KnowledgeBaseRetrievalActivityRecord, + // KnowledgeBaseRetrievalActivityRecordUnion as KnowledgeBaseRetrievalActivityRecord, KnowledgeBaseRetrievalRequest, KnowledgeBaseRetrievalResponse, - KnowledgeBaseSearchIndexActivityArguments, - KnowledgeBaseSearchIndexActivityRecord, - SearchIndexFieldReference as KnowledgeBaseSearchIndexFieldReference, + // KnowledgeBaseSearchIndexActivityArguments, + // KnowledgeBaseSearchIndexActivityRecord, + // SearchIndexFieldReference as KnowledgeBaseSearchIndexFieldReference, KnowledgeBaseSearchIndexReference, - KnowledgeBaseWebActivityArguments, - KnowledgeBaseWebActivityRecord, + // KnowledgeBaseWebActivityArguments, + // KnowledgeBaseWebActivityRecord, KnowledgeBaseWebReference, KnowledgeRetrievalIntent, KnowledgeRetrievalLowReasoningEffort, KnowledgeRetrievalMediumReasoningEffort, KnowledgeRetrievalMinimalReasoningEffort, KnowledgeRetrievalSemanticIntent, - KnowledgeSourceKind, + // KnowledgeSourceKind, KnowledgeSourceParamsUnion as KnowledgeSourceParams, - KnownKnowledgeSourceKind, + // KnownKnowledgeSourceKind, RemoteSharePointKnowledgeSourceParams, SearchIndexKnowledgeSourceParams, SharePointSensitivityLabelInfo, WebKnowledgeSourceParams, -} from "./generated/knowledgeBase/models/index.js"; +} from "./models/azure/search/documents/knowledgeBase/index.js"; export { AIFoundryModelCatalogName, - AIServices, + // AIServices, AIServicesAccountKey, AnalyzedTokenInfo, AnalyzeResult, @@ -120,7 +120,7 @@ export { DataChangeDetectionPolicy as BaseDataChangeDetectionPolicy, DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy, KnowledgeBaseModel as BaseKnowledgeBaseModel, - KnowledgeSourceVectorizer as BaseKnowledgeSourceVectorizer, + // KnowledgeSourceVectorizer as BaseKnowledgeSourceVectorizer, LexicalAnalyzer as BaseLexicalAnalyzer, LexicalNormalizer as BaseLexicalNormalizer, LexicalTokenizer as BaseLexicalTokenizer, @@ -134,7 +134,7 @@ export { CharFilterName, ChatCompletionExtraParametersBehavior, ChatCompletionResponseFormat, - ChatCompletionResponseFormatJsonSchemaProperties, + // ChatCompletionResponseFormatJsonSchemaProperties, ChatCompletionResponseFormatType, ChatCompletionSchema, CjkBigramTokenFilter, @@ -143,13 +143,13 @@ export { ClassicTokenizer, CognitiveServicesAccountKey, CommonGramTokenFilter, - CommonModelParameters, - CompletedSynchronizationState, + // CommonModelParameters, + // CompletedSynchronizationState, ConditionalSkill, - ContentUnderstandingSkill, - ContentUnderstandingSkillChunkingProperties, - ContentUnderstandingSkillChunkingUnit, - ContentUnderstandingSkillExtractionOptions, + // ContentUnderstandingSkill, + // ContentUnderstandingSkillChunkingProperties, + // ContentUnderstandingSkillChunkingUnit, + // ContentUnderstandingSkillExtractionOptions, CorsOptions, CustomEntity, CustomEntityAlias, @@ -174,18 +174,18 @@ export { FieldMappingFunction, FreshnessScoringFunction, FreshnessScoringParameters, - GetIndexStatsSummaryOptionalParams, - GetIndexStatsSummaryResponse, + // GetIndexStatsSummaryOptionalParams, + // GetIndexStatsSummaryResponse, HighWaterMarkChangeDetectionPolicy, - IndexedSharePointContainerName, + // IndexedSharePointContainerName, IndexerExecutionResult, IndexerExecutionStatus, IndexerExecutionStatusDetail, IndexerPermissionOption, IndexerResyncOption, - IndexerRuntime, - IndexersResyncOptionalParams, - IndexerState, + // IndexerRuntime, + // IndexersResyncOptionalParams, + // IndexerState, IndexerStatus, IndexingMode, IndexingSchedule, @@ -194,15 +194,15 @@ export { InputFieldMappingEntry, KeepTokenFilter, KeywordMarkerTokenFilter, - KnownKnowledgeRetrievalOutputMode, - KnowledgeRetrievalOutputMode, - KnowledgeRetrievalReasoningEffort, - KnowledgeSourceContentExtractionMode, - KnowledgeSourceIngestionPermissionOption, + // KnownKnowledgeRetrievalOutputMode, + // KnowledgeRetrievalOutputMode, + // KnowledgeRetrievalReasoningEffort, + // KnowledgeSourceContentExtractionMode, + // KnowledgeSourceIngestionPermissionOption, KnowledgeSourceReference, - KnowledgeSourceStatistics, - KnowledgeSourceStatus, - KnowledgeSourceSynchronizationStatus, + // KnowledgeSourceStatistics, + // KnowledgeSourceStatus, + // KnowledgeSourceSynchronizationStatus, KnownAIFoundryModelCatalogName, KnownAzureOpenAIModelName, KnownBlobIndexerDataToExtract, @@ -263,7 +263,7 @@ export { LexicalNormalizerName, LexicalTokenizerName, LimitTokenFilter, - ListIndexStatsSummary, + // ListIndexStatsSummary, LuceneStandardAnalyzer, MagnitudeScoringFunction, MagnitudeScoringParameters, @@ -279,7 +279,7 @@ export { NGramTokenizer, OcrLineEnding, OutputFieldMappingEntry, - PathHierarchyTokenizerV2 as PathHierarchyTokenizer, + PathHierarchyTokenizer, PatternCaptureTokenFilter, PatternReplaceCharFilter, PatternReplaceTokenFilter, @@ -287,7 +287,7 @@ export { PhoneticEncoder, PhoneticTokenFilter, RankingOrder, - RemoteSharePointKnowledgeSourceParameters, + // RemoteSharePointKnowledgeSourceParameters, RescoringOptions, ResourceCounter, ScalarQuantizationCompression, @@ -309,20 +309,20 @@ export { SearchIndexerLimits, SearchIndexerStatus, SearchIndexerWarning, - SearchIndexFieldReference, + // SearchIndexFieldReference, SearchIndexKnowledgeSourceParameters, SearchIndexPermissionFilterOption, - Suggester as SearchSuggester, + SearchSuggester, SemanticConfiguration, SemanticField, SemanticPrioritizedFields, SemanticSearch, SentimentSkillV3, - ServiceCounters, - ServiceLimits, + // ServiceCounters, + // ServiceLimits, ShaperSkill, ShingleTokenFilter, - Similarity, + SimilarityAlgorithmUnion as Similarity, SnowballTokenFilter, SnowballTokenFilterLanguage, SoftDeleteColumnDeletionDetectionPolicy, @@ -335,7 +335,7 @@ export { StopAnalyzer, StopwordsList, StopwordsTokenFilter, - SynchronizationState, + // SynchronizationState, SynonymTokenFilter, TagScoringFunction, TagScoringParameters, @@ -351,11 +351,11 @@ export { VectorSearchCompressionTarget, VectorSearchProfile, VectorSearchVectorizerKind, - WebKnowledgeSourceDomains, - WebKnowledgeSourceParameters, + // WebKnowledgeSourceDomains, + // WebKnowledgeSourceParameters, WordDelimiterTokenFilter, - WebKnowledgeSourceDomain, -} from "./generated/service/models/index.js"; + // WebKnowledgeSourceDomain, +} from "./models/azure/search/documents/indexes/index.js"; export { default as GeographyPoint } from "./geographyPoint.js"; export { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; export { diff --git a/sdk/search/search-documents/src/indexModels.ts b/sdk/search/search-documents/src/indexModels.ts index a72aa170e608..4222bb68130c 100644 --- a/sdk/search/search-documents/src/indexModels.ts +++ b/sdk/search/search-documents/src/indexModels.ts @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -import type { OperationOptions } from "@azure/core-client"; -import type { PagedAsyncIterableIterator } from "@azure/core-paging"; +import type { OperationOptions } from "@azure-rest/core-client"; +import type { PagedAsyncIterableIterator } from "./static-helpers/pagingHelpers.js"; import type { AutocompleteMode, DebugInfo, @@ -27,7 +27,7 @@ import type { SemanticFieldState, SemanticQueryRewritesResultType, VectorsDebugInfo, -} from "./generated/data/models/index.js"; +} from "./models/azure/search/documents/models.js"; import type GeographyPoint from "./geographyPoint.js"; /** diff --git a/sdk/search/search-documents/src/knowledgeBaseModels.ts b/sdk/search/search-documents/src/knowledgeBaseModels.ts index 579a5afad607..67f4870ea858 100644 --- a/sdk/search/search-documents/src/knowledgeBaseModels.ts +++ b/sdk/search/search-documents/src/knowledgeBaseModels.ts @@ -1,12 +1,12 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -import type { OperationOptions } from "@azure/core-client"; +import type { OperationOptions } from "@azure-rest/core-client"; import type { KnowledgeRetrievalOutputMode, - KnowledgeSourceReference, KnowledgeRetrievalReasoningEffort, -} from "./generated/service/index.js"; +} from "./models/azure/search/documents/knowledgeBase/models.js"; +import type { KnowledgeSourceReference } from "./models/azure/search/documents/indexes/models.js"; import type { KnowledgeBaseModel, SearchResourceEncryptionKey } from "./serviceModels.js"; export interface RetrieveKnowledgeOptions extends OperationOptions { diff --git a/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/index.ts b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/index.ts new file mode 100644 index 000000000000..f19d348533fb --- /dev/null +++ b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/index.ts @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + createKnowledgeBaseRetrieval, + KnowledgeBaseRetrievalContext, + KnowledgeBaseRetrievalClientOptionalParams, +} from "./knowledgeBaseRetrievalContext.js"; +export { retrieve } from "./operations.js"; +export { RetrieveOptionalParams } from "./options.js"; diff --git a/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts new file mode 100644 index 000000000000..d173341d7f45 --- /dev/null +++ b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/knowledgeBaseRetrievalContext.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface KnowledgeBaseRetrievalContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; +} + +/** Optional parameters for the client. */ +export interface KnowledgeBaseRetrievalClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createKnowledgeBaseRetrieval( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: KnowledgeBaseRetrievalClientOptionalParams = {}, +): KnowledgeBaseRetrievalContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion } as KnowledgeBaseRetrievalContext; +} diff --git a/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/operations.ts b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/operations.ts new file mode 100644 index 000000000000..15f6dc96ae27 --- /dev/null +++ b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/operations.ts @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { KnowledgeBaseRetrievalContext as Client } from "./index.js"; +import { + KnowledgeBaseRetrievalRequest, + knowledgeBaseRetrievalRequestSerializer, + KnowledgeBaseRetrievalResponse, + knowledgeBaseRetrievalResponseDeserializer, +} from "../../models/azure/search/documents/knowledgeBase/models.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { RetrieveOptionalParams } from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _retrieveSend( + context: Client, + knowledgeBaseName: string, + retrievalRequest: KnowledgeBaseRetrievalRequest, + options: RetrieveOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/retrieve/{knowledgeBaseName}{?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeBaseRetrievalRequestSerializer(retrievalRequest), + }); +} + +export async function _retrieveDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "206"]; + if (!expectedStatuses.includes(result.status)) { + throw createRestError(result); + } + + return knowledgeBaseRetrievalResponseDeserializer(result.body); +} + +/** KnowledgeBase retrieves relevant data from backing stores. */ +export async function retrieve( + context: Client, + knowledgeBaseName: string, + retrievalRequest: KnowledgeBaseRetrievalRequest, + options: RetrieveOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _retrieveSend(context, knowledgeBaseName, retrievalRequest, options); + return _retrieveDeserialize(result); +} diff --git a/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/options.ts b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/options.ts new file mode 100644 index 000000000000..ae04f201e1fd --- /dev/null +++ b/sdk/search/search-documents/src/knowledgeBaseRetrieval/api/options.ts @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface RetrieveOptionalParams extends OperationOptions { + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} diff --git a/sdk/search/search-documents/src/knowledgeBaseRetrieval/index.ts b/sdk/search/search-documents/src/knowledgeBaseRetrieval/index.ts new file mode 100644 index 000000000000..53eccc42f981 --- /dev/null +++ b/sdk/search/search-documents/src/knowledgeBaseRetrieval/index.ts @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { KnowledgeBaseRetrievalClient } from "./knowledgeBaseRetrievalClient.js"; +export { + KnowledgeBaseRetrievalContext, + KnowledgeBaseRetrievalClientOptionalParams, + RetrieveOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/src/knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.ts b/sdk/search/search-documents/src/knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.ts new file mode 100644 index 000000000000..e3be22b4ebd8 --- /dev/null +++ b/sdk/search/search-documents/src/knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.ts @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + createKnowledgeBaseRetrieval, + KnowledgeBaseRetrievalContext, + KnowledgeBaseRetrievalClientOptionalParams, +} from "./api/index.js"; +import { + KnowledgeBaseRetrievalRequest, + KnowledgeBaseRetrievalResponse, +} from "../models/azure/search/documents/knowledgeBase/models.js"; +import { retrieve } from "./api/operations.js"; +import { RetrieveOptionalParams } from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { KnowledgeBaseRetrievalClientOptionalParams } from "./api/knowledgeBaseRetrievalContext.js"; + +export class KnowledgeBaseRetrievalClient { + private _client: KnowledgeBaseRetrievalContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: KnowledgeBaseRetrievalClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createKnowledgeBaseRetrieval(endpointParam, credential, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** KnowledgeBase retrieves relevant data from backing stores. */ + retrieve( + knowledgeBaseName: string, + retrievalRequest: KnowledgeBaseRetrievalRequest, + options: RetrieveOptionalParams = { requestOptions: {} }, + ): Promise { + return retrieve(this._client, knowledgeBaseName, retrievalRequest, options); + } +} diff --git a/sdk/search/search-documents/src/knowledgeRetrievalClient.ts b/sdk/search/search-documents/src/knowledgeRetrievalClient.ts index b6af7c4e1a17..7a3fb03e0ab6 100644 --- a/sdk/search/search-documents/src/knowledgeRetrievalClient.ts +++ b/sdk/search/search-documents/src/knowledgeRetrievalClient.ts @@ -5,15 +5,17 @@ import type { KeyCredential, TokenCredential } from "@azure/core-auth"; import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import type { Pipeline } from "@azure/core-rest-pipeline"; -import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; +import { + bearerTokenAuthenticationPolicy, + bearerTokenAuthenticationPolicyName, +} from "@azure/core-rest-pipeline"; import type { KnowledgeBaseRetrievalRequest, KnowledgeBaseRetrievalResponse, -} from "./generated/knowledgeBase/index.js"; -import { SearchClient as GeneratedClient } from "./generated/knowledgeBase/searchClient.js"; +} from "./models/azure/search/documents/knowledgeBase/models.js"; +import type { KnowledgeBaseRetrievalClientOptionalParams } from "./knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.js"; +import { KnowledgeBaseRetrievalClient as GeneratedClient } from "./knowledgeBaseRetrieval/knowledgeBaseRetrievalClient.js"; import type { RetrieveKnowledgeOptions } from "./knowledgeBaseModels.js"; import { logger } from "./logger.js"; import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; @@ -21,11 +23,12 @@ import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPoli import { KnownSearchAudience } from "./searchAudience.js"; import * as utils from "./serviceUtils.js"; import { createSpan } from "./tracing.js"; +import type { ClientOptions } from "@azure-rest/core-client"; /** * Client options used to configure Cognitive Search API requests. */ -export interface KnowledgeRetrievalClientOptions extends ExtendedCommonClientOptions { +export interface KnowledgeRetrievalClientOptions extends ClientOptions { /** * The service version to use when communicating with the service. */ @@ -100,8 +103,9 @@ export class KnowledgeRetrievalClient { this.endpoint = endpoint; this.knowledgeBaseName = knowledgeBaseName; - const internalClientPipelineOptions: InternalClientPipelineOptions = { + const internalClientPipelineOptions: KnowledgeBaseRetrievalClientOptionalParams = { ...options, + apiVersion: options.serviceVersion ?? utils.defaultServiceVersion, ...{ loggingOptions: { logger: logger.info, @@ -119,15 +123,13 @@ export class KnowledgeRetrievalClient { this.serviceVersion = options.serviceVersion ?? utils.defaultServiceVersion; - this.client = new GeneratedClient( - this.endpoint, - this.knowledgeBaseName, - this.serviceVersion, - internalClientPipelineOptions, - ); + this.client = new GeneratedClient(endpoint, credential, internalClientPipelineOptions); this.pipeline = this.client.pipeline; + // TODO: consider leaving the policy in-place instead of removing and re-adding + this.pipeline.removePolicy({ name: bearerTokenAuthenticationPolicyName }); + if (isTokenCredential(credential)) { const scope: string = options.audience ? `${options.audience}/.default` @@ -153,7 +155,7 @@ export class KnowledgeRetrievalClient { ); try { - return await this.client.knowledgeRetrieval.retrieve(retrievalRequest, updatedOptions); + return await this.client.retrieve(this.knowledgeBaseName, retrievalRequest, updatedOptions); } catch (e: any) { span.setStatus({ status: "error", diff --git a/sdk/search/search-documents/src/logger.ts b/sdk/search/search-documents/src/logger.ts index f3a939cdcde7..d8dc5f9c8c13 100644 --- a/sdk/search/search-documents/src/logger.ts +++ b/sdk/search/search-documents/src/logger.ts @@ -2,8 +2,4 @@ // Licensed under the MIT License. import { createClientLogger } from "@azure/logger"; - -/** - * The `@azure/logger` configuration for this package. - */ -export const logger = createClientLogger("search"); +export const logger = createClientLogger("search-documents"); diff --git a/sdk/search/search-documents/src/models/azure/search/documents/index.ts b/sdk/search/search-documents/src/models/azure/search/documents/index.ts new file mode 100644 index 000000000000..7efdbd9aacf6 --- /dev/null +++ b/sdk/search/search-documents/src/models/azure/search/documents/index.ts @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + ErrorResponse, + ErrorDetail, + ErrorAdditionalInfo, + SearchDocumentsResult, + FacetResult, + QueryAnswerResult, + DebugInfo, + QueryRewritesDebugInfo, + QueryRewritesValuesDebugInfo, + SearchRequest, + KnownQueryType, + QueryType, + KnownScoringStatistics, + ScoringStatistics, + KnownQueryDebugMode, + QueryDebugMode, + KnownSearchMode, + SearchMode, + KnownQueryLanguage, + QueryLanguage, + KnownQuerySpellerType, + QuerySpellerType, + KnownSemanticErrorMode, + SemanticErrorMode, + KnownQueryAnswerType, + QueryAnswerType, + KnownQueryCaptionType, + QueryCaptionType, + KnownQueryRewritesType, + QueryRewritesType, + VectorQuery, + VectorQueryUnion, + VectorThreshold, + VectorThresholdUnion, + KnownVectorThresholdKind, + VectorThresholdKind, + VectorSimilarityThreshold, + SearchScoreThreshold, + KnownVectorQueryKind, + VectorQueryKind, + VectorizedQuery, + VectorizableTextQuery, + VectorizableImageUrlQuery, + VectorizableImageBinaryQuery, + KnownVectorFilterMode, + VectorFilterMode, + HybridSearch, + KnownHybridCountAndFacetMode, + HybridCountAndFacetMode, + SearchResult, + QueryCaptionResult, + DocumentDebugInfo, + SemanticDebugInfo, + QueryResultDocumentSemanticField, + KnownSemanticFieldState, + SemanticFieldState, + QueryResultDocumentRerankerInput, + VectorsDebugInfo, + QueryResultDocumentSubscores, + TextResult, + SingleVectorFieldResult, + QueryResultDocumentInnerHit, + KnownSemanticErrorReason, + SemanticErrorReason, + KnownSemanticSearchResultsType, + SemanticSearchResultsType, + KnownSemanticQueryRewritesResultType, + SemanticQueryRewritesResultType, + LookupDocument, + SuggestDocumentsResult, + SuggestResult, + IndexDocumentsBatch, + IndexAction, + KnownIndexActionType, + IndexActionType, + IndexDocumentsResult, + IndexingResult, + AutocompleteResult, + AutocompleteItem, + KnownAutocompleteMode, + AutocompleteMode, +} from "./models.js"; diff --git a/sdk/search/search-documents/src/models/azure/search/documents/indexes/index.ts b/sdk/search/search-documents/src/models/azure/search/documents/indexes/index.ts new file mode 100644 index 000000000000..ce12b9ceeb72 --- /dev/null +++ b/sdk/search/search-documents/src/models/azure/search/documents/indexes/index.ts @@ -0,0 +1,364 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + SynonymMap, + SearchResourceEncryptionKey, + AzureActiveDirectoryApplicationCredentials, + SearchIndexerDataIdentity, + SearchIndexerDataIdentityUnion, + SearchIndexerDataNoneIdentity, + SearchIndexerDataUserAssignedIdentity, + ListSynonymMapsResult, + SearchIndex, + SearchField, + KnownSearchFieldDataType, + SearchFieldDataType, + KnownPermissionFilter, + PermissionFilter, + KnownLexicalAnalyzerName, + LexicalAnalyzerName, + KnownLexicalNormalizerName, + LexicalNormalizerName, + KnownVectorEncodingFormat, + VectorEncodingFormat, + ScoringProfile, + TextWeights, + ScoringFunction, + ScoringFunctionUnion, + KnownScoringFunctionInterpolation, + ScoringFunctionInterpolation, + DistanceScoringFunction, + DistanceScoringParameters, + FreshnessScoringFunction, + FreshnessScoringParameters, + MagnitudeScoringFunction, + MagnitudeScoringParameters, + TagScoringFunction, + TagScoringParameters, + KnownScoringFunctionAggregation, + ScoringFunctionAggregation, + CorsOptions, + SearchSuggester, + LexicalAnalyzer, + LexicalAnalyzerUnion, + CustomAnalyzer, + KnownLexicalTokenizerName, + LexicalTokenizerName, + KnownTokenFilterName, + TokenFilterName, + KnownCharFilterName, + CharFilterName, + PatternAnalyzer, + KnownRegexFlags, + RegexFlags, + LuceneStandardAnalyzer, + StopAnalyzer, + LexicalTokenizer, + LexicalTokenizerUnion, + ClassicTokenizer, + EdgeNGramTokenizer, + KnownTokenCharacterKind, + TokenCharacterKind, + KeywordTokenizer, + MicrosoftLanguageTokenizer, + KnownMicrosoftTokenizerLanguage, + MicrosoftTokenizerLanguage, + MicrosoftLanguageStemmingTokenizer, + KnownMicrosoftStemmingTokenizerLanguage, + MicrosoftStemmingTokenizerLanguage, + NGramTokenizer, + PathHierarchyTokenizer, + PatternTokenizer, + LuceneStandardTokenizer, + UaxUrlEmailTokenizer, + TokenFilter, + TokenFilterUnion, + AsciiFoldingTokenFilter, + CjkBigramTokenFilter, + KnownCjkBigramTokenFilterScripts, + CjkBigramTokenFilterScripts, + CommonGramTokenFilter, + DictionaryDecompounderTokenFilter, + EdgeNGramTokenFilter, + KnownEdgeNGramTokenFilterSide, + EdgeNGramTokenFilterSide, + ElisionTokenFilter, + KeepTokenFilter, + KeywordMarkerTokenFilter, + LengthTokenFilter, + LimitTokenFilter, + NGramTokenFilter, + PatternCaptureTokenFilter, + PatternReplaceTokenFilter, + PhoneticTokenFilter, + KnownPhoneticEncoder, + PhoneticEncoder, + ShingleTokenFilter, + SnowballTokenFilter, + KnownSnowballTokenFilterLanguage, + SnowballTokenFilterLanguage, + StemmerTokenFilter, + KnownStemmerTokenFilterLanguage, + StemmerTokenFilterLanguage, + StemmerOverrideTokenFilter, + StopwordsTokenFilter, + KnownStopwordsList, + StopwordsList, + SynonymTokenFilter, + TruncateTokenFilter, + UniqueTokenFilter, + WordDelimiterTokenFilter, + CharFilter, + CharFilterUnion, + MappingCharFilter, + PatternReplaceCharFilter, + LexicalNormalizer, + LexicalNormalizerUnion, + CustomNormalizer, + SimilarityAlgorithm, + SimilarityAlgorithmUnion, + ClassicSimilarity, + BM25Similarity, + SemanticSearch, + SemanticConfiguration, + SemanticPrioritizedFields, + SemanticField, + KnownRankingOrder, + RankingOrder, + VectorSearch, + VectorSearchProfile, + VectorSearchAlgorithmConfiguration, + VectorSearchAlgorithmConfigurationUnion, + KnownVectorSearchAlgorithmKind, + VectorSearchAlgorithmKind, + HnswAlgorithmConfiguration, + HnswParameters, + KnownVectorSearchAlgorithmMetric, + VectorSearchAlgorithmMetric, + ExhaustiveKnnAlgorithmConfiguration, + ExhaustiveKnnParameters, + VectorSearchVectorizer, + VectorSearchVectorizerUnion, + KnownVectorSearchVectorizerKind, + VectorSearchVectorizerKind, + AzureOpenAIVectorizer, + AzureOpenAIVectorizerParameters, + KnownAzureOpenAIModelName, + AzureOpenAIModelName, + WebApiVectorizer, + WebApiVectorizerParameters, + AIServicesVisionVectorizer, + AIServicesVisionParameters, + AzureMachineLearningVectorizer, + AzureMachineLearningParameters, + KnownAIFoundryModelCatalogName, + AIFoundryModelCatalogName, + VectorSearchCompression, + VectorSearchCompressionUnion, + RescoringOptions, + KnownVectorSearchCompressionRescoreStorageMethod, + VectorSearchCompressionRescoreStorageMethod, + KnownVectorSearchCompressionKind, + VectorSearchCompressionKind, + ScalarQuantizationCompression, + ScalarQuantizationParameters, + KnownVectorSearchCompressionTarget, + VectorSearchCompressionTarget, + BinaryQuantizationCompression, + KnownSearchIndexPermissionFilterOption, + SearchIndexPermissionFilterOption, + GetIndexStatisticsResult, + AnalyzeTextOptions, + AnalyzeResult, + AnalyzedTokenInfo, + SearchAlias, + KnowledgeBase, + KnowledgeSourceReference, + KnowledgeBaseModel, + KnowledgeBaseModelUnion, + KnownKnowledgeBaseModelKind, + KnowledgeBaseModelKind, + KnowledgeBaseAzureOpenAIModel, + AzureOpenAiParameters, + KnowledgeSource, + KnowledgeSourceUnion, + KnownKnowledgeSourceKind, + KnowledgeSourceKind, + SearchIndexKnowledgeSource, + SearchIndexKnowledgeSourceParameters, + AzureBlobKnowledgeSource, + AzureBlobKnowledgeSourceParameters, + IndexingSchedule, + CreatedResources, + KnownBlobIndexerDataToExtract, + BlobIndexerDataToExtract, + KnownBlobIndexerImageAction, + BlobIndexerImageAction, + KnownBlobIndexerParsingMode, + BlobIndexerParsingMode, + KnownMarkdownHeaderDepth, + MarkdownHeaderDepth, + KnownMarkdownParsingSubmode, + MarkdownParsingSubmode, + KnownBlobIndexerPDFTextRotationAlgorithm, + BlobIndexerPDFTextRotationAlgorithm, + SearchServiceStatistics, + ServiceCounters, + ResourceCounter, + ServiceLimits, + IndexStatisticsSummary, + SearchIndexerDataSourceConnection, + KnownSearchIndexerDataSourceType, + SearchIndexerDataSourceType, + DataSourceCredentials, + SearchIndexerDataContainer, + KnownIndexerPermissionOption, + IndexerPermissionOption, + DataChangeDetectionPolicy, + DataChangeDetectionPolicyUnion, + HighWaterMarkChangeDetectionPolicy, + SqlIntegratedChangeTrackingPolicy, + DataDeletionDetectionPolicy, + DataDeletionDetectionPolicyUnion, + SoftDeleteColumnDeletionDetectionPolicy, + NativeBlobSoftDeleteDeletionDetectionPolicy, + ListDataSourcesResult, + DocumentKeysOrIds, + SearchIndexer, + IndexingParameters, + IndexingParametersConfiguration, + KnownIndexerExecutionEnvironment, + IndexerExecutionEnvironment, + FieldMapping, + FieldMappingFunction, + SearchIndexerCache, + ListIndexersResult, + SearchIndexerStatus, + KnownIndexerStatus, + IndexerStatus, + IndexerExecutionResult, + KnownIndexerExecutionStatus, + IndexerExecutionStatus, + KnownIndexerExecutionStatusDetail, + IndexerExecutionStatusDetail, + KnownIndexingMode, + IndexingMode, + IndexerCurrentState, + SearchIndexerError, + SearchIndexerWarning, + SearchIndexerLimits, + SearchIndexerSkillset, + SearchIndexerSkill, + SearchIndexerSkillUnion, + InputFieldMappingEntry, + OutputFieldMappingEntry, + ConditionalSkill, + KeyPhraseExtractionSkill, + KnownKeyPhraseExtractionSkillLanguage, + KeyPhraseExtractionSkillLanguage, + OcrSkill, + KnownOcrSkillLanguage, + OcrSkillLanguage, + KnownOcrLineEnding, + OcrLineEnding, + ImageAnalysisSkill, + KnownImageAnalysisSkillLanguage, + ImageAnalysisSkillLanguage, + KnownVisualFeature, + VisualFeature, + KnownImageDetail, + ImageDetail, + LanguageDetectionSkill, + ShaperSkill, + MergeSkill, + EntityRecognitionSkill, + KnownEntityCategory, + EntityCategory, + KnownEntityRecognitionSkillLanguage, + EntityRecognitionSkillLanguage, + SentimentSkill, + KnownSentimentSkillLanguage, + SentimentSkillLanguage, + SentimentSkillV3, + EntityLinkingSkill, + EntityRecognitionSkillV3, + PIIDetectionSkill, + KnownPIIDetectionSkillMaskingMode, + PIIDetectionSkillMaskingMode, + SplitSkill, + KnownSplitSkillLanguage, + SplitSkillLanguage, + KnownTextSplitMode, + TextSplitMode, + KnownSplitSkillUnit, + SplitSkillUnit, + AzureOpenAITokenizerParameters, + KnownSplitSkillEncoderModelName, + SplitSkillEncoderModelName, + CustomEntityLookupSkill, + KnownCustomEntityLookupSkillLanguage, + CustomEntityLookupSkillLanguage, + CustomEntity, + CustomEntityAlias, + TextTranslationSkill, + KnownTextTranslationSkillLanguage, + TextTranslationSkillLanguage, + DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, + KnownDocumentIntelligenceLayoutSkillOutputFormat, + DocumentIntelligenceLayoutSkillOutputFormat, + KnownDocumentIntelligenceLayoutSkillOutputMode, + DocumentIntelligenceLayoutSkillOutputMode, + KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + KnownDocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillChunkingProperties, + KnownDocumentIntelligenceLayoutSkillChunkingUnit, + DocumentIntelligenceLayoutSkillChunkingUnit, + WebApiSkill, + AzureMachineLearningSkill, + AzureOpenAIEmbeddingSkill, + VisionVectorizeSkill, + ContentUnderstandingSkill, + KnownContentUnderstandingSkillExtractionOptions, + ContentUnderstandingSkillExtractionOptions, + ContentUnderstandingSkillChunkingProperties, + KnownContentUnderstandingSkillChunkingUnit, + ContentUnderstandingSkillChunkingUnit, + ChatCompletionSkill, + WebApiHttpHeaders, + CommonModelParameters, + KnownChatCompletionExtraParametersBehavior, + ChatCompletionExtraParametersBehavior, + ChatCompletionResponseFormat, + KnownChatCompletionResponseFormatType, + ChatCompletionResponseFormatType, + ChatCompletionSchemaProperties, + ChatCompletionSchema, + CognitiveServicesAccount, + CognitiveServicesAccountUnion, + DefaultCognitiveServicesAccount, + CognitiveServicesAccountKey, + AIServicesAccountKey, + AIServicesAccountIdentity, + SearchIndexerKnowledgeStore, + SearchIndexerKnowledgeStoreProjection, + SearchIndexerKnowledgeStoreTableProjectionSelector, + SearchIndexerKnowledgeStoreObjectProjectionSelector, + SearchIndexerKnowledgeStoreFileProjectionSelector, + SearchIndexerKnowledgeStoreParameters, + SearchIndexerIndexProjection, + SearchIndexerIndexProjectionSelector, + SearchIndexerIndexProjectionsParameters, + KnownIndexProjectionMode, + IndexProjectionMode, + SearchIndexerKnowledgeStoreProjectionSelector, + SearchIndexerKnowledgeStoreBlobProjectionSelector, + ListSkillsetsResult, + SkillNames, + IndexerResyncBody, + KnownIndexerResyncOption, + IndexerResyncOption, +} from "./models.js"; diff --git a/sdk/search/search-documents/src/models/azure/search/documents/indexes/models.ts b/sdk/search/search-documents/src/models/azure/search/documents/indexes/models.ts new file mode 100644 index 000000000000..970069edaa0c --- /dev/null +++ b/sdk/search/search-documents/src/models/azure/search/documents/indexes/models.ts @@ -0,0 +1,11955 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { serializeRecord } from "../../../../../static-helpers/serialization/serialize-record.js"; +import { + IndexedSharePointKnowledgeSource, + IndexedOneLakeKnowledgeSource, + WebKnowledgeSource, + RemoteSharePointKnowledgeSource, + ServiceIndexersRuntime, + serviceIndexersRuntimeDeserializer, + IndexerRuntime, + indexerRuntimeDeserializer, + indexedOneLakeKnowledgeSourceDeserializer, + indexedOneLakeKnowledgeSourceSerializer, + indexedSharePointKnowledgeSourceDeserializer, + indexedSharePointKnowledgeSourceSerializer, + remoteSharePointKnowledgeSourceDeserializer, + remoteSharePointKnowledgeSourceSerializer, + webKnowledgeSourceDeserializer, + webKnowledgeSourceSerializer, +} from "../../../../models.js"; +import { + knowledgeRetrievalReasoningEffortUnionSerializer, + knowledgeRetrievalReasoningEffortUnionDeserializer, + KnowledgeRetrievalReasoningEffortUnion, + KnowledgeRetrievalOutputMode, +} from "../knowledgeBase/models.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Represents a synonym map definition. */ +export interface SynonymMap { + /** The name of the synonym map. */ + name: string; + /** The format of the synonym map. Only the 'solr' format is currently supported. */ + format: "solr"; + /** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */ + synonyms: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; + /** The ETag of the synonym map. */ + eTag?: string; +} + +export function synonymMapSerializer(item: SynonymMap): any { + return { + name: item["name"], + format: item["format"], + synonyms: item["synonyms"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + "@odata.etag": item["eTag"], + }; +} + +export function synonymMapDeserializer(item: any): SynonymMap { + return { + name: item["name"], + format: item["format"], + synonyms: item["synonyms"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + eTag: item["@odata.etag"], + }; +} + +/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */ +export interface SearchResourceEncryptionKey { + /** The name of your Azure Key Vault key to be used to encrypt your data at rest. */ + keyName: string; + /** The version of your Azure Key Vault key to be used to encrypt your data at rest. */ + keyVersion?: string; + /** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be `https://my-keyvault-name.vault.azure.net`. */ + vaultUri: string; + /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */ + accessCredentials?: AzureActiveDirectoryApplicationCredentials; + /** An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified, the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; +} + +export function searchResourceEncryptionKeySerializer(item: SearchResourceEncryptionKey): any { + return { + keyVaultKeyName: item["keyName"], + keyVaultKeyVersion: item["keyVersion"], + keyVaultUri: item["vaultUri"], + accessCredentials: !item["accessCredentials"] + ? item["accessCredentials"] + : azureActiveDirectoryApplicationCredentialsSerializer(item["accessCredentials"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + }; +} + +export function searchResourceEncryptionKeyDeserializer(item: any): SearchResourceEncryptionKey { + return { + keyName: item["keyVaultKeyName"], + keyVersion: item["keyVaultKeyVersion"], + vaultUri: item["keyVaultUri"], + accessCredentials: !item["accessCredentials"] + ? item["accessCredentials"] + : azureActiveDirectoryApplicationCredentialsDeserializer(item["accessCredentials"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + }; +} + +/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */ +export interface AzureActiveDirectoryApplicationCredentials { + /** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */ + applicationId: string; + /** The authentication key of the specified AAD application. */ + applicationSecret?: string; +} + +export function azureActiveDirectoryApplicationCredentialsSerializer( + item: AzureActiveDirectoryApplicationCredentials, +): any { + return { + applicationId: item["applicationId"], + applicationSecret: item["applicationSecret"], + }; +} + +export function azureActiveDirectoryApplicationCredentialsDeserializer( + item: any, +): AzureActiveDirectoryApplicationCredentials { + return { + applicationId: item["applicationId"], + applicationSecret: item["applicationSecret"], + }; +} + +/** Abstract base type for data identities. */ +export interface SearchIndexerDataIdentity { + /** A URI fragment specifying the type of identity. */ + /** The discriminator possible values: #Microsoft.Azure.Search.DataNoneIdentity, #Microsoft.Azure.Search.DataUserAssignedIdentity */ + odatatype: string; +} + +export function searchIndexerDataIdentitySerializer(item: SearchIndexerDataIdentity): any { + return { "@odata.type": item["odatatype"] }; +} + +export function searchIndexerDataIdentityDeserializer(item: any): SearchIndexerDataIdentity { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for SearchIndexerDataIdentityUnion */ +export type SearchIndexerDataIdentityUnion = + | SearchIndexerDataNoneIdentity + | SearchIndexerDataUserAssignedIdentity + | SearchIndexerDataIdentity; + +export function searchIndexerDataIdentityUnionSerializer( + item: SearchIndexerDataIdentityUnion, +): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DataNoneIdentity": + return searchIndexerDataNoneIdentitySerializer(item as SearchIndexerDataNoneIdentity); + + case "#Microsoft.Azure.Search.DataUserAssignedIdentity": + return searchIndexerDataUserAssignedIdentitySerializer( + item as SearchIndexerDataUserAssignedIdentity, + ); + + default: + return searchIndexerDataIdentitySerializer(item); + } +} + +export function searchIndexerDataIdentityUnionDeserializer( + item: any, +): SearchIndexerDataIdentityUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DataNoneIdentity": + return searchIndexerDataNoneIdentityDeserializer(item as SearchIndexerDataNoneIdentity); + + case "#Microsoft.Azure.Search.DataUserAssignedIdentity": + return searchIndexerDataUserAssignedIdentityDeserializer( + item as SearchIndexerDataUserAssignedIdentity, + ); + + default: + return searchIndexerDataIdentityDeserializer(item); + } +} + +/** Clears the identity property of a datasource. */ +export interface SearchIndexerDataNoneIdentity extends SearchIndexerDataIdentity { + /** The discriminator for derived types. */ + odatatype: "#Microsoft.Azure.Search.DataNoneIdentity"; +} + +export function searchIndexerDataNoneIdentitySerializer(item: SearchIndexerDataNoneIdentity): any { + return { "@odata.type": item["odatatype"] }; +} + +export function searchIndexerDataNoneIdentityDeserializer( + item: any, +): SearchIndexerDataNoneIdentity { + return { + odatatype: item["@odata.type"], + }; +} + +/** Specifies the identity for a datasource to use. */ +export interface SearchIndexerDataUserAssignedIdentity extends SearchIndexerDataIdentity { + /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */ + resourceId: string; + /** A URI fragment specifying the type of identity. */ + odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity"; +} + +export function searchIndexerDataUserAssignedIdentitySerializer( + item: SearchIndexerDataUserAssignedIdentity, +): any { + return { + "@odata.type": item["odatatype"], + userAssignedIdentity: item["resourceId"], + }; +} + +export function searchIndexerDataUserAssignedIdentityDeserializer( + item: any, +): SearchIndexerDataUserAssignedIdentity { + return { + odatatype: item["@odata.type"], + resourceId: item["userAssignedIdentity"], + }; +} + +/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */ +export interface ListSynonymMapsResult { + /** The synonym maps in the Search service. */ + synonymMaps: SynonymMap[]; +} + +export function listSynonymMapsResultDeserializer(item: any): ListSynonymMapsResult { + return { + synonymMaps: synonymMapArrayDeserializer(item["value"]), + }; +} + +export function synonymMapArraySerializer(result: Array): any[] { + return result.map((item) => { + return synonymMapSerializer(item); + }); +} + +export function synonymMapArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return synonymMapDeserializer(item); + }); +} + +/** Represents a search index definition, which describes the fields and search behavior of an index. */ +export interface SearchIndex { + /** The name of the index. */ + name: string; + /** The description of the index. */ + description?: string; + /** The fields of the index. */ + fields: SearchField[]; + /** The scoring profiles for the index. */ + scoringProfiles?: ScoringProfile[]; + /** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */ + defaultScoringProfile?: string; + /** Options to control Cross-Origin Resource Sharing (CORS) for the index. */ + corsOptions?: CorsOptions; + /** The suggesters for the index. */ + suggesters?: SearchSuggester[]; + /** The analyzers for the index. */ + analyzers?: LexicalAnalyzerUnion[]; + /** The tokenizers for the index. */ + tokenizers?: LexicalTokenizerUnion[]; + /** The token filters for the index. */ + tokenFilters?: TokenFilterUnion[]; + /** The character filters for the index. */ + charFilters?: CharFilterUnion[]; + /** The normalizers for the index. */ + normalizers?: LexicalNormalizerUnion[]; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; + /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */ + similarity?: SimilarityAlgorithmUnion; + /** Defines parameters for a search index that influence semantic capabilities. */ + semanticSearch?: SemanticSearch; + /** Contains configuration options related to vector search. */ + vectorSearch?: VectorSearch; + /** A value indicating whether permission filtering is enabled for the index. */ + permissionFilterOption?: SearchIndexPermissionFilterOption; + /** A value indicating whether Purview is enabled for the index. */ + purviewEnabled?: boolean; + /** The ETag of the index. */ + eTag?: string; +} + +export function searchIndexSerializer(item: SearchIndex): any { + return { + name: item["name"], + description: item["description"], + fields: searchFieldArraySerializer(item["fields"]), + scoringProfiles: !item["scoringProfiles"] + ? item["scoringProfiles"] + : scoringProfileArraySerializer(item["scoringProfiles"]), + defaultScoringProfile: item["defaultScoringProfile"], + corsOptions: !item["corsOptions"] + ? item["corsOptions"] + : corsOptionsSerializer(item["corsOptions"]), + suggesters: !item["suggesters"] + ? item["suggesters"] + : searchSuggesterArraySerializer(item["suggesters"]), + analyzers: !item["analyzers"] + ? item["analyzers"] + : lexicalAnalyzerUnionArraySerializer(item["analyzers"]), + tokenizers: !item["tokenizers"] + ? item["tokenizers"] + : lexicalTokenizerUnionArraySerializer(item["tokenizers"]), + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : tokenFilterUnionArraySerializer(item["tokenFilters"]), + charFilters: !item["charFilters"] + ? item["charFilters"] + : charFilterUnionArraySerializer(item["charFilters"]), + normalizers: !item["normalizers"] + ? item["normalizers"] + : lexicalNormalizerUnionArraySerializer(item["normalizers"]), + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + similarity: !item["similarity"] + ? item["similarity"] + : similarityAlgorithmUnionSerializer(item["similarity"]), + semantic: !item["semanticSearch"] + ? item["semanticSearch"] + : semanticSearchSerializer(item["semanticSearch"]), + vectorSearch: !item["vectorSearch"] + ? item["vectorSearch"] + : vectorSearchSerializer(item["vectorSearch"]), + permissionFilterOption: item["permissionFilterOption"], + purviewEnabled: item["purviewEnabled"], + "@odata.etag": item["eTag"], + }; +} + +export function searchIndexDeserializer(item: any): SearchIndex { + return { + name: item["name"], + description: item["description"], + fields: searchFieldArrayDeserializer(item["fields"]), + scoringProfiles: !item["scoringProfiles"] + ? item["scoringProfiles"] + : scoringProfileArrayDeserializer(item["scoringProfiles"]), + defaultScoringProfile: item["defaultScoringProfile"], + corsOptions: !item["corsOptions"] + ? item["corsOptions"] + : corsOptionsDeserializer(item["corsOptions"]), + suggesters: !item["suggesters"] + ? item["suggesters"] + : searchSuggesterArrayDeserializer(item["suggesters"]), + analyzers: !item["analyzers"] + ? item["analyzers"] + : lexicalAnalyzerUnionArrayDeserializer(item["analyzers"]), + tokenizers: !item["tokenizers"] + ? item["tokenizers"] + : lexicalTokenizerUnionArrayDeserializer(item["tokenizers"]), + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : tokenFilterUnionArrayDeserializer(item["tokenFilters"]), + charFilters: !item["charFilters"] + ? item["charFilters"] + : charFilterUnionArrayDeserializer(item["charFilters"]), + normalizers: !item["normalizers"] + ? item["normalizers"] + : lexicalNormalizerUnionArrayDeserializer(item["normalizers"]), + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + similarity: !item["similarity"] + ? item["similarity"] + : similarityAlgorithmUnionDeserializer(item["similarity"]), + semanticSearch: !item["semantic"] + ? item["semantic"] + : semanticSearchDeserializer(item["semantic"]), + vectorSearch: !item["vectorSearch"] + ? item["vectorSearch"] + : vectorSearchDeserializer(item["vectorSearch"]), + permissionFilterOption: item["permissionFilterOption"], + purviewEnabled: item["purviewEnabled"], + eTag: item["@odata.etag"], + }; +} + +export function searchFieldArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchFieldSerializer(item); + }); +} + +export function searchFieldArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchFieldDeserializer(item); + }); +} + +/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */ +export interface SearchField { + /** The name of the field, which must be unique within the fields collection of the index or parent field. */ + name: string; + /** The data type of the field. */ + type: SearchFieldDataType; + /** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */ + key?: boolean; + /** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields. */ + retrievable?: boolean; + /** An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage overhead. This can only be set during index creation and only for vector fields. This property cannot be changed for existing fields or set as false for new fields. If this property is set as false, the property 'retrievable' must also be set to false. This property must be true or unset for key fields, for new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */ + stored?: boolean; + /** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */ + searchable?: boolean; + /** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */ + filterable?: boolean; + /** A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */ + sortable?: boolean; + /** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */ + facetable?: boolean; + /** A value indicating whether the field should be used as a permission filter. */ + permissionFilter?: PermissionFilter; + /** A value indicating whether the field contains sensitivity label information. */ + sensitivityLabel?: boolean; + /** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + analyzerName?: LexicalAnalyzerName; + /** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */ + searchAnalyzerName?: LexicalAnalyzerName; + /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + indexAnalyzerName?: LexicalAnalyzerName; + /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + normalizerName?: LexicalNormalizerName; + /** The dimensionality of the vector field. */ + vectorSearchDimensions?: number; + /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */ + vectorSearchProfileName?: string; + /** The encoding format to interpret the field contents. */ + vectorEncodingFormat?: VectorEncodingFormat; + /** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */ + synonymMapNames?: string[]; + /** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */ + fields?: SearchField[]; +} + +export function searchFieldSerializer(item: SearchField): any { + return { + name: item["name"], + type: item["type"], + key: item["key"], + retrievable: item["retrievable"], + stored: item["stored"], + searchable: item["searchable"], + filterable: item["filterable"], + sortable: item["sortable"], + facetable: item["facetable"], + permissionFilter: item["permissionFilter"], + sensitivityLabel: item["sensitivityLabel"], + analyzer: item["analyzerName"], + searchAnalyzer: item["searchAnalyzerName"], + indexAnalyzer: item["indexAnalyzerName"], + normalizer: item["normalizerName"], + dimensions: item["vectorSearchDimensions"], + vectorSearchProfile: item["vectorSearchProfileName"], + vectorEncoding: item["vectorEncodingFormat"], + synonymMaps: !item["synonymMapNames"] + ? item["synonymMapNames"] + : item["synonymMapNames"].map((p: any) => { + return p; + }), + fields: !item["fields"] ? item["fields"] : searchFieldArraySerializer(item["fields"]), + }; +} + +export function searchFieldDeserializer(item: any): SearchField { + return { + name: item["name"], + type: item["type"], + key: item["key"], + retrievable: item["retrievable"], + stored: item["stored"], + searchable: item["searchable"], + filterable: item["filterable"], + sortable: item["sortable"], + facetable: item["facetable"], + permissionFilter: item["permissionFilter"], + sensitivityLabel: item["sensitivityLabel"], + analyzerName: item["analyzer"], + searchAnalyzerName: item["searchAnalyzer"], + indexAnalyzerName: item["indexAnalyzer"], + normalizerName: item["normalizer"], + vectorSearchDimensions: item["dimensions"], + vectorSearchProfileName: item["vectorSearchProfile"], + vectorEncodingFormat: item["vectorEncoding"], + synonymMapNames: !item["synonymMaps"] + ? item["synonymMaps"] + : item["synonymMaps"].map((p: any) => { + return p; + }), + fields: !item["fields"] ? item["fields"] : searchFieldArrayDeserializer(item["fields"]), + }; +} + +/** Defines the data type of a field in a search index. */ +export enum KnownSearchFieldDataType { + /** Indicates that a field contains a string. */ + String = "Edm.String", + /** Indicates that a field contains a 32-bit signed integer. */ + Int32 = "Edm.Int32", + /** Indicates that a field contains a 64-bit signed integer. */ + Int64 = "Edm.Int64", + /** Indicates that a field contains an IEEE double-precision floating point number. */ + Double = "Edm.Double", + /** Indicates that a field contains a Boolean value (true or false). */ + Boolean = "Edm.Boolean", + /** Indicates that a field contains a date/time value, including timezone information. */ + DateTimeOffset = "Edm.DateTimeOffset", + /** Indicates that a field contains a geo-location in terms of longitude and latitude. */ + GeographyPoint = "Edm.GeographyPoint", + /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */ + Complex = "Edm.ComplexType", + /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */ + Single = "Edm.Single", + /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */ + Half = "Edm.Half", + /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */ + Int16 = "Edm.Int16", + /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */ + SByte = "Edm.SByte", + /** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */ + Byte = "Edm.Byte", +} + +/** + * Defines the data type of a field in a search index. \ + * {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **Edm.String**: Indicates that a field contains a string. \ + * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \ + * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \ + * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \ + * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \ + * **Edm.DateTimeOffset**: Indicates that a field contains a date\/time value, including timezone information. \ + * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \ + * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \ + * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). \ + * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). \ + * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). \ + * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). \ + * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). + */ +export type SearchFieldDataType = string; + +/** A value indicating whether the field should be used as a permission filter. */ +export enum KnownPermissionFilter { + /** Field represents user IDs that should be used to filter document access on queries. */ + UserIds = "userIds", + /** Field represents group IDs that should be used to filter document access on queries. */ + GroupIds = "groupIds", + /** Field represents an RBAC scope that should be used to filter document access on queries. */ + RbacScope = "rbacScope", +} + +/** + * A value indicating whether the field should be used as a permission filter. \ + * {@link KnownPermissionFilter} can be used interchangeably with PermissionFilter, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **userIds**: Field represents user IDs that should be used to filter document access on queries. \ + * **groupIds**: Field represents group IDs that should be used to filter document access on queries. \ + * **rbacScope**: Field represents an RBAC scope that should be used to filter document access on queries. + */ +export type PermissionFilter = string; + +/** Defines the names of all text analyzers supported by the search engine. */ +export enum KnownLexicalAnalyzerName { + /** Microsoft analyzer for Arabic. */ + ArMicrosoft = "ar.microsoft", + /** Lucene analyzer for Arabic. */ + ArLucene = "ar.lucene", + /** Lucene analyzer for Armenian. */ + HyLucene = "hy.lucene", + /** Microsoft analyzer for Bangla. */ + BnMicrosoft = "bn.microsoft", + /** Lucene analyzer for Basque. */ + EuLucene = "eu.lucene", + /** Microsoft analyzer for Bulgarian. */ + BgMicrosoft = "bg.microsoft", + /** Lucene analyzer for Bulgarian. */ + BgLucene = "bg.lucene", + /** Microsoft analyzer for Catalan. */ + CaMicrosoft = "ca.microsoft", + /** Lucene analyzer for Catalan. */ + CaLucene = "ca.lucene", + /** Microsoft analyzer for Chinese (Simplified). */ + ZhHansMicrosoft = "zh-Hans.microsoft", + /** Lucene analyzer for Chinese (Simplified). */ + ZhHansLucene = "zh-Hans.lucene", + /** Microsoft analyzer for Chinese (Traditional). */ + ZhHantMicrosoft = "zh-Hant.microsoft", + /** Lucene analyzer for Chinese (Traditional). */ + ZhHantLucene = "zh-Hant.lucene", + /** Microsoft analyzer for Croatian. */ + HrMicrosoft = "hr.microsoft", + /** Microsoft analyzer for Czech. */ + CsMicrosoft = "cs.microsoft", + /** Lucene analyzer for Czech. */ + CsLucene = "cs.lucene", + /** Microsoft analyzer for Danish. */ + DaMicrosoft = "da.microsoft", + /** Lucene analyzer for Danish. */ + DaLucene = "da.lucene", + /** Microsoft analyzer for Dutch. */ + NlMicrosoft = "nl.microsoft", + /** Lucene analyzer for Dutch. */ + NlLucene = "nl.lucene", + /** Microsoft analyzer for English. */ + EnMicrosoft = "en.microsoft", + /** Lucene analyzer for English. */ + EnLucene = "en.lucene", + /** Microsoft analyzer for Estonian. */ + EtMicrosoft = "et.microsoft", + /** Microsoft analyzer for Finnish. */ + FiMicrosoft = "fi.microsoft", + /** Lucene analyzer for Finnish. */ + FiLucene = "fi.lucene", + /** Microsoft analyzer for French. */ + FrMicrosoft = "fr.microsoft", + /** Lucene analyzer for French. */ + FrLucene = "fr.lucene", + /** Lucene analyzer for Galician. */ + GlLucene = "gl.lucene", + /** Microsoft analyzer for German. */ + DeMicrosoft = "de.microsoft", + /** Lucene analyzer for German. */ + DeLucene = "de.lucene", + /** Microsoft analyzer for Greek. */ + ElMicrosoft = "el.microsoft", + /** Lucene analyzer for Greek. */ + ElLucene = "el.lucene", + /** Microsoft analyzer for Gujarati. */ + GuMicrosoft = "gu.microsoft", + /** Microsoft analyzer for Hebrew. */ + HeMicrosoft = "he.microsoft", + /** Microsoft analyzer for Hindi. */ + HiMicrosoft = "hi.microsoft", + /** Lucene analyzer for Hindi. */ + HiLucene = "hi.lucene", + /** Microsoft analyzer for Hungarian. */ + HuMicrosoft = "hu.microsoft", + /** Lucene analyzer for Hungarian. */ + HuLucene = "hu.lucene", + /** Microsoft analyzer for Icelandic. */ + IsMicrosoft = "is.microsoft", + /** Microsoft analyzer for Indonesian (Bahasa). */ + IdMicrosoft = "id.microsoft", + /** Lucene analyzer for Indonesian. */ + IdLucene = "id.lucene", + /** Lucene analyzer for Irish. */ + GaLucene = "ga.lucene", + /** Microsoft analyzer for Italian. */ + ItMicrosoft = "it.microsoft", + /** Lucene analyzer for Italian. */ + ItLucene = "it.lucene", + /** Microsoft analyzer for Japanese. */ + JaMicrosoft = "ja.microsoft", + /** Lucene analyzer for Japanese. */ + JaLucene = "ja.lucene", + /** Microsoft analyzer for Kannada. */ + KnMicrosoft = "kn.microsoft", + /** Microsoft analyzer for Korean. */ + KoMicrosoft = "ko.microsoft", + /** Lucene analyzer for Korean. */ + KoLucene = "ko.lucene", + /** Microsoft analyzer for Latvian. */ + LvMicrosoft = "lv.microsoft", + /** Lucene analyzer for Latvian. */ + LvLucene = "lv.lucene", + /** Microsoft analyzer for Lithuanian. */ + LtMicrosoft = "lt.microsoft", + /** Microsoft analyzer for Malayalam. */ + MlMicrosoft = "ml.microsoft", + /** Microsoft analyzer for Malay (Latin). */ + MsMicrosoft = "ms.microsoft", + /** Microsoft analyzer for Marathi. */ + MrMicrosoft = "mr.microsoft", + /** Microsoft analyzer for Norwegian (Bokmål). */ + NbMicrosoft = "nb.microsoft", + /** Lucene analyzer for Norwegian. */ + NoLucene = "no.lucene", + /** Lucene analyzer for Persian. */ + FaLucene = "fa.lucene", + /** Microsoft analyzer for Polish. */ + PlMicrosoft = "pl.microsoft", + /** Lucene analyzer for Polish. */ + PlLucene = "pl.lucene", + /** Microsoft analyzer for Portuguese (Brazil). */ + PtBrMicrosoft = "pt-BR.microsoft", + /** Lucene analyzer for Portuguese (Brazil). */ + PtBrLucene = "pt-BR.lucene", + /** Microsoft analyzer for Portuguese (Portugal). */ + PtPtMicrosoft = "pt-PT.microsoft", + /** Lucene analyzer for Portuguese (Portugal). */ + PtPtLucene = "pt-PT.lucene", + /** Microsoft analyzer for Punjabi. */ + PaMicrosoft = "pa.microsoft", + /** Microsoft analyzer for Romanian. */ + RoMicrosoft = "ro.microsoft", + /** Lucene analyzer for Romanian. */ + RoLucene = "ro.lucene", + /** Microsoft analyzer for Russian. */ + RuMicrosoft = "ru.microsoft", + /** Lucene analyzer for Russian. */ + RuLucene = "ru.lucene", + /** Microsoft analyzer for Serbian (Cyrillic). */ + SrCyrillicMicrosoft = "sr-cyrillic.microsoft", + /** Microsoft analyzer for Serbian (Latin). */ + SrLatinMicrosoft = "sr-latin.microsoft", + /** Microsoft analyzer for Slovak. */ + SkMicrosoft = "sk.microsoft", + /** Microsoft analyzer for Slovenian. */ + SlMicrosoft = "sl.microsoft", + /** Microsoft analyzer for Spanish. */ + EsMicrosoft = "es.microsoft", + /** Lucene analyzer for Spanish. */ + EsLucene = "es.lucene", + /** Microsoft analyzer for Swedish. */ + SvMicrosoft = "sv.microsoft", + /** Lucene analyzer for Swedish. */ + SvLucene = "sv.lucene", + /** Microsoft analyzer for Tamil. */ + TaMicrosoft = "ta.microsoft", + /** Microsoft analyzer for Telugu. */ + TeMicrosoft = "te.microsoft", + /** Microsoft analyzer for Thai. */ + ThMicrosoft = "th.microsoft", + /** Lucene analyzer for Thai. */ + ThLucene = "th.lucene", + /** Microsoft analyzer for Turkish. */ + TrMicrosoft = "tr.microsoft", + /** Lucene analyzer for Turkish. */ + TrLucene = "tr.lucene", + /** Microsoft analyzer for Ukrainian. */ + UkMicrosoft = "uk.microsoft", + /** Microsoft analyzer for Urdu. */ + UrMicrosoft = "ur.microsoft", + /** Microsoft analyzer for Vietnamese. */ + ViMicrosoft = "vi.microsoft", + /** Standard Lucene analyzer. */ + StandardLucene = "standard.lucene", + /** Standard ASCII Folding Lucene analyzer. See https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers */ + StandardAsciiFoldingLucene = "standardasciifolding.lucene", + /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html */ + Keyword = "keyword", + /** Flexibly separates text into terms via a regular expression pattern. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html */ + Pattern = "pattern", + /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html */ + Simple = "simple", + /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html */ + Stop = "stop", + /** An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html */ + Whitespace = "whitespace", +} + +/** + * Defines the names of all text analyzers supported by the search engine. \ + * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **ar.microsoft**: Microsoft analyzer for Arabic. \ + * **ar.lucene**: Lucene analyzer for Arabic. \ + * **hy.lucene**: Lucene analyzer for Armenian. \ + * **bn.microsoft**: Microsoft analyzer for Bangla. \ + * **eu.lucene**: Lucene analyzer for Basque. \ + * **bg.microsoft**: Microsoft analyzer for Bulgarian. \ + * **bg.lucene**: Lucene analyzer for Bulgarian. \ + * **ca.microsoft**: Microsoft analyzer for Catalan. \ + * **ca.lucene**: Lucene analyzer for Catalan. \ + * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \ + * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \ + * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \ + * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \ + * **hr.microsoft**: Microsoft analyzer for Croatian. \ + * **cs.microsoft**: Microsoft analyzer for Czech. \ + * **cs.lucene**: Lucene analyzer for Czech. \ + * **da.microsoft**: Microsoft analyzer for Danish. \ + * **da.lucene**: Lucene analyzer for Danish. \ + * **nl.microsoft**: Microsoft analyzer for Dutch. \ + * **nl.lucene**: Lucene analyzer for Dutch. \ + * **en.microsoft**: Microsoft analyzer for English. \ + * **en.lucene**: Lucene analyzer for English. \ + * **et.microsoft**: Microsoft analyzer for Estonian. \ + * **fi.microsoft**: Microsoft analyzer for Finnish. \ + * **fi.lucene**: Lucene analyzer for Finnish. \ + * **fr.microsoft**: Microsoft analyzer for French. \ + * **fr.lucene**: Lucene analyzer for French. \ + * **gl.lucene**: Lucene analyzer for Galician. \ + * **de.microsoft**: Microsoft analyzer for German. \ + * **de.lucene**: Lucene analyzer for German. \ + * **el.microsoft**: Microsoft analyzer for Greek. \ + * **el.lucene**: Lucene analyzer for Greek. \ + * **gu.microsoft**: Microsoft analyzer for Gujarati. \ + * **he.microsoft**: Microsoft analyzer for Hebrew. \ + * **hi.microsoft**: Microsoft analyzer for Hindi. \ + * **hi.lucene**: Lucene analyzer for Hindi. \ + * **hu.microsoft**: Microsoft analyzer for Hungarian. \ + * **hu.lucene**: Lucene analyzer for Hungarian. \ + * **is.microsoft**: Microsoft analyzer for Icelandic. \ + * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \ + * **id.lucene**: Lucene analyzer for Indonesian. \ + * **ga.lucene**: Lucene analyzer for Irish. \ + * **it.microsoft**: Microsoft analyzer for Italian. \ + * **it.lucene**: Lucene analyzer for Italian. \ + * **ja.microsoft**: Microsoft analyzer for Japanese. \ + * **ja.lucene**: Lucene analyzer for Japanese. \ + * **kn.microsoft**: Microsoft analyzer for Kannada. \ + * **ko.microsoft**: Microsoft analyzer for Korean. \ + * **ko.lucene**: Lucene analyzer for Korean. \ + * **lv.microsoft**: Microsoft analyzer for Latvian. \ + * **lv.lucene**: Lucene analyzer for Latvian. \ + * **lt.microsoft**: Microsoft analyzer for Lithuanian. \ + * **ml.microsoft**: Microsoft analyzer for Malayalam. \ + * **ms.microsoft**: Microsoft analyzer for Malay (Latin). \ + * **mr.microsoft**: Microsoft analyzer for Marathi. \ + * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \ + * **no.lucene**: Lucene analyzer for Norwegian. \ + * **fa.lucene**: Lucene analyzer for Persian. \ + * **pl.microsoft**: Microsoft analyzer for Polish. \ + * **pl.lucene**: Lucene analyzer for Polish. \ + * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \ + * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \ + * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \ + * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \ + * **pa.microsoft**: Microsoft analyzer for Punjabi. \ + * **ro.microsoft**: Microsoft analyzer for Romanian. \ + * **ro.lucene**: Lucene analyzer for Romanian. \ + * **ru.microsoft**: Microsoft analyzer for Russian. \ + * **ru.lucene**: Lucene analyzer for Russian. \ + * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \ + * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \ + * **sk.microsoft**: Microsoft analyzer for Slovak. \ + * **sl.microsoft**: Microsoft analyzer for Slovenian. \ + * **es.microsoft**: Microsoft analyzer for Spanish. \ + * **es.lucene**: Lucene analyzer for Spanish. \ + * **sv.microsoft**: Microsoft analyzer for Swedish. \ + * **sv.lucene**: Lucene analyzer for Swedish. \ + * **ta.microsoft**: Microsoft analyzer for Tamil. \ + * **te.microsoft**: Microsoft analyzer for Telugu. \ + * **th.microsoft**: Microsoft analyzer for Thai. \ + * **th.lucene**: Lucene analyzer for Thai. \ + * **tr.microsoft**: Microsoft analyzer for Turkish. \ + * **tr.lucene**: Lucene analyzer for Turkish. \ + * **uk.microsoft**: Microsoft analyzer for Ukrainian. \ + * **ur.microsoft**: Microsoft analyzer for Urdu. \ + * **vi.microsoft**: Microsoft analyzer for Vietnamese. \ + * **standard.lucene**: Standard Lucene analyzer. \ + * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers \ + * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html \ + * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html \ + * **simple**: Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html \ + * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html \ + * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html + */ +export type LexicalAnalyzerName = string; + +/** Defines the names of all text normalizers supported by the search engine. */ +export enum KnownLexicalNormalizerName { + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */ + Elision = "elision", + /** Normalizes token text to lowercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Standard normalizer, which consists of lowercase and asciifolding. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */ + Standard = "standard", + /** Normalizes token text to uppercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */ + Uppercase = "uppercase", +} + +/** + * Defines the names of all text normalizers supported by the search engine. \ + * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **lowercase**: Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **uppercase**: Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html + */ +export type LexicalNormalizerName = string; + +/** The encoding format for interpreting vector field contents. */ +export enum KnownVectorEncodingFormat { + /** Encoding format representing bits packed into a wider data type. */ + PackedBit = "packedBit", +} + +/** + * The encoding format for interpreting vector field contents. \ + * {@link KnownVectorEncodingFormat} can be used interchangeably with VectorEncodingFormat, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **packedBit**: Encoding format representing bits packed into a wider data type. + */ +export type VectorEncodingFormat = string; + +export function scoringProfileArraySerializer(result: Array): any[] { + return result.map((item) => { + return scoringProfileSerializer(item); + }); +} + +export function scoringProfileArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return scoringProfileDeserializer(item); + }); +} + +/** Defines parameters for a search index that influence scoring in search queries. */ +export interface ScoringProfile { + /** The name of the scoring profile. */ + name: string; + /** Parameters that boost scoring based on text matches in certain index fields. */ + textWeights?: TextWeights; + /** The collection of functions that influence the scoring of documents. */ + functions?: ScoringFunctionUnion[]; + /** A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. */ + functionAggregation?: ScoringFunctionAggregation; +} + +export function scoringProfileSerializer(item: ScoringProfile): any { + return { + name: item["name"], + text: !item["textWeights"] ? item["textWeights"] : textWeightsSerializer(item["textWeights"]), + functions: !item["functions"] + ? item["functions"] + : scoringFunctionUnionArraySerializer(item["functions"]), + functionAggregation: item["functionAggregation"], + }; +} + +export function scoringProfileDeserializer(item: any): ScoringProfile { + return { + name: item["name"], + textWeights: !item["text"] ? item["text"] : textWeightsDeserializer(item["text"]), + functions: !item["functions"] + ? item["functions"] + : scoringFunctionUnionArrayDeserializer(item["functions"]), + functionAggregation: item["functionAggregation"], + }; +} + +/** Defines weights on index fields for which matches should boost scoring in search queries. */ +export interface TextWeights { + /** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */ + weights: Record; +} + +export function textWeightsSerializer(item: TextWeights): any { + return { weights: item["weights"] }; +} + +export function textWeightsDeserializer(item: any): TextWeights { + return { + weights: item["weights"], + }; +} + +export function scoringFunctionUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return scoringFunctionUnionSerializer(item); + }); +} + +export function scoringFunctionUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return scoringFunctionUnionDeserializer(item); + }); +} + +/** Base type for functions that can modify document scores during ranking. */ +export interface ScoringFunction { + /** The name of the field used as input to the scoring function. */ + fieldName: string; + /** A multiplier for the raw score. Must be a positive number not equal to 1.0. */ + boost: number; + /** A value indicating how boosting will be interpolated across document scores; defaults to "Linear". */ + interpolation?: ScoringFunctionInterpolation; + /** Type of ScoringFunction. */ + /** The discriminator possible values: distance, freshness, magnitude, tag */ + type: string; +} + +export function scoringFunctionSerializer(item: ScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + }; +} + +export function scoringFunctionDeserializer(item: any): ScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + }; +} + +/** Alias for ScoringFunctionUnion */ +export type ScoringFunctionUnion = + | DistanceScoringFunction + | FreshnessScoringFunction + | MagnitudeScoringFunction + | TagScoringFunction + | ScoringFunction; + +export function scoringFunctionUnionSerializer(item: ScoringFunctionUnion): any { + switch (item.type) { + case "distance": + return distanceScoringFunctionSerializer(item as DistanceScoringFunction); + + case "freshness": + return freshnessScoringFunctionSerializer(item as FreshnessScoringFunction); + + case "magnitude": + return magnitudeScoringFunctionSerializer(item as MagnitudeScoringFunction); + + case "tag": + return tagScoringFunctionSerializer(item as TagScoringFunction); + + default: + return scoringFunctionSerializer(item); + } +} + +export function scoringFunctionUnionDeserializer(item: any): ScoringFunctionUnion { + switch (item.type) { + case "distance": + return distanceScoringFunctionDeserializer(item as DistanceScoringFunction); + + case "freshness": + return freshnessScoringFunctionDeserializer(item as FreshnessScoringFunction); + + case "magnitude": + return magnitudeScoringFunctionDeserializer(item as MagnitudeScoringFunction); + + case "tag": + return tagScoringFunctionDeserializer(item as TagScoringFunction); + + default: + return scoringFunctionDeserializer(item); + } +} + +/** Defines the function used to interpolate score boosting across a range of documents. */ +export enum KnownScoringFunctionInterpolation { + /** Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. */ + Linear = "linear", + /** Boosts scores by a constant factor. */ + Constant = "constant", + /** Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. */ + Quadratic = "quadratic", + /** Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. */ + Logarithmic = "logarithmic", +} + +/** + * Defines the function used to interpolate score boosting across a range of documents. \ + * {@link KnownScoringFunctionInterpolation} can be used interchangeably with ScoringFunctionInterpolation, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **linear**: Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. \ + * **constant**: Boosts scores by a constant factor. \ + * **quadratic**: Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. \ + * **logarithmic**: Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + */ +export type ScoringFunctionInterpolation = string; + +/** Defines a function that boosts scores based on distance from a geographic location. */ +export interface DistanceScoringFunction extends ScoringFunction { + /** Parameter values for the distance scoring function. */ + parameters: DistanceScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "distance"; +} + +export function distanceScoringFunctionSerializer(item: DistanceScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + distance: distanceScoringParametersSerializer(item["parameters"]), + }; +} + +export function distanceScoringFunctionDeserializer(item: any): DistanceScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: distanceScoringParametersDeserializer(item["distance"]), + }; +} + +/** Provides parameter values to a distance scoring function. */ +export interface DistanceScoringParameters { + /** The name of the parameter passed in search queries to specify the reference location. */ + referencePointParameter: string; + /** The distance in kilometers from the reference location where the boosting range ends. */ + boostingDistance: number; +} + +export function distanceScoringParametersSerializer(item: DistanceScoringParameters): any { + return { + referencePointParameter: item["referencePointParameter"], + boostingDistance: item["boostingDistance"], + }; +} + +export function distanceScoringParametersDeserializer(item: any): DistanceScoringParameters { + return { + referencePointParameter: item["referencePointParameter"], + boostingDistance: item["boostingDistance"], + }; +} + +/** Defines a function that boosts scores based on the value of a date-time field. */ +export interface FreshnessScoringFunction extends ScoringFunction { + /** Parameter values for the freshness scoring function. */ + parameters: FreshnessScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "freshness"; +} + +export function freshnessScoringFunctionSerializer(item: FreshnessScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + freshness: freshnessScoringParametersSerializer(item["parameters"]), + }; +} + +export function freshnessScoringFunctionDeserializer(item: any): FreshnessScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: freshnessScoringParametersDeserializer(item["freshness"]), + }; +} + +/** Provides parameter values to a freshness scoring function. */ +export interface FreshnessScoringParameters { + /** The expiration period after which boosting will stop for a particular document. */ + boostingDuration: string; +} + +export function freshnessScoringParametersSerializer(item: FreshnessScoringParameters): any { + return { boostingDuration: item["boostingDuration"] }; +} + +export function freshnessScoringParametersDeserializer(item: any): FreshnessScoringParameters { + return { + boostingDuration: item["boostingDuration"], + }; +} + +/** Defines a function that boosts scores based on the magnitude of a numeric field. */ +export interface MagnitudeScoringFunction extends ScoringFunction { + /** Parameter values for the magnitude scoring function. */ + parameters: MagnitudeScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "magnitude"; +} + +export function magnitudeScoringFunctionSerializer(item: MagnitudeScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + magnitude: magnitudeScoringParametersSerializer(item["parameters"]), + }; +} + +export function magnitudeScoringFunctionDeserializer(item: any): MagnitudeScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: magnitudeScoringParametersDeserializer(item["magnitude"]), + }; +} + +/** Provides parameter values to a magnitude scoring function. */ +export interface MagnitudeScoringParameters { + /** The field value at which boosting starts. */ + boostingRangeStart: number; + /** The field value at which boosting ends. */ + boostingRangeEnd: number; + /** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */ + shouldBoostBeyondRangeByConstant?: boolean; +} + +export function magnitudeScoringParametersSerializer(item: MagnitudeScoringParameters): any { + return { + boostingRangeStart: item["boostingRangeStart"], + boostingRangeEnd: item["boostingRangeEnd"], + constantBoostBeyondRange: item["shouldBoostBeyondRangeByConstant"], + }; +} + +export function magnitudeScoringParametersDeserializer(item: any): MagnitudeScoringParameters { + return { + boostingRangeStart: item["boostingRangeStart"], + boostingRangeEnd: item["boostingRangeEnd"], + shouldBoostBeyondRangeByConstant: item["constantBoostBeyondRange"], + }; +} + +/** Defines a function that boosts scores of documents with string values matching a given list of tags. */ +export interface TagScoringFunction extends ScoringFunction { + /** Parameter values for the tag scoring function. */ + parameters: TagScoringParameters; + /** Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. */ + type: "tag"; +} + +export function tagScoringFunctionSerializer(item: TagScoringFunction): any { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + tag: tagScoringParametersSerializer(item["parameters"]), + }; +} + +export function tagScoringFunctionDeserializer(item: any): TagScoringFunction { + return { + fieldName: item["fieldName"], + boost: item["boost"], + interpolation: item["interpolation"], + type: item["type"], + parameters: tagScoringParametersDeserializer(item["tag"]), + }; +} + +/** Provides parameter values to a tag scoring function. */ +export interface TagScoringParameters { + /** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */ + tagsParameter: string; +} + +export function tagScoringParametersSerializer(item: TagScoringParameters): any { + return { tagsParameter: item["tagsParameter"] }; +} + +export function tagScoringParametersDeserializer(item: any): TagScoringParameters { + return { + tagsParameter: item["tagsParameter"], + }; +} + +/** Defines the aggregation function used to combine the results of all the scoring functions in a scoring profile. */ +export enum KnownScoringFunctionAggregation { + /** Boost scores by the sum of all scoring function results. */ + Sum = "sum", + /** Boost scores by the average of all scoring function results. */ + Average = "average", + /** Boost scores by the minimum of all scoring function results. */ + Minimum = "minimum", + /** Boost scores by the maximum of all scoring function results. */ + Maximum = "maximum", + /** Boost scores using the first applicable scoring function in the scoring profile. */ + FirstMatching = "firstMatching", +} + +/** + * Defines the aggregation function used to combine the results of all the scoring functions in a scoring profile. \ + * {@link KnownScoringFunctionAggregation} can be used interchangeably with ScoringFunctionAggregation, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **sum**: Boost scores by the sum of all scoring function results. \ + * **average**: Boost scores by the average of all scoring function results. \ + * **minimum**: Boost scores by the minimum of all scoring function results. \ + * **maximum**: Boost scores by the maximum of all scoring function results. \ + * **firstMatching**: Boost scores using the first applicable scoring function in the scoring profile. + */ +export type ScoringFunctionAggregation = string; + +/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */ +export interface CorsOptions { + /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */ + allowedOrigins: string[]; + /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */ + maxAgeInSeconds?: number; +} + +export function corsOptionsSerializer(item: CorsOptions): any { + return { + allowedOrigins: item["allowedOrigins"].map((p: any) => { + return p; + }), + maxAgeInSeconds: item["maxAgeInSeconds"], + }; +} + +export function corsOptionsDeserializer(item: any): CorsOptions { + return { + allowedOrigins: item["allowedOrigins"].map((p: any) => { + return p; + }), + maxAgeInSeconds: item["maxAgeInSeconds"], + }; +} + +export function searchSuggesterArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchSuggesterSerializer(item); + }); +} + +export function searchSuggesterArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchSuggesterDeserializer(item); + }); +} + +/** Defines how the Suggest API should apply to a group of fields in the index. */ +export interface SearchSuggester { + /** The name of the suggester. */ + name: string; + /** A value indicating the capabilities of the suggester. */ + searchMode: "analyzingInfixMatching"; + /** The list of field names to which the suggester applies. Each field must be searchable. */ + sourceFields: string[]; +} + +export function searchSuggesterSerializer(item: SearchSuggester): any { + return { + name: item["name"], + searchMode: item["searchMode"], + sourceFields: item["sourceFields"].map((p: any) => { + return p; + }), + }; +} + +export function searchSuggesterDeserializer(item: any): SearchSuggester { + return { + name: item["name"], + searchMode: item["searchMode"], + sourceFields: item["sourceFields"].map((p: any) => { + return p; + }), + }; +} + +export function lexicalAnalyzerUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return lexicalAnalyzerUnionSerializer(item); + }); +} + +export function lexicalAnalyzerUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return lexicalAnalyzerUnionDeserializer(item); + }); +} + +/** Base type for analyzers. */ +export interface LexicalAnalyzer { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.CustomAnalyzer, #Microsoft.Azure.Search.PatternAnalyzer, #Microsoft.Azure.Search.StandardAnalyzer, #Microsoft.Azure.Search.StopAnalyzer */ + odatatype: string; + /** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function lexicalAnalyzerSerializer(item: LexicalAnalyzer): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function lexicalAnalyzerDeserializer(item: any): LexicalAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for LexicalAnalyzerUnion */ +export type LexicalAnalyzerUnion = + | CustomAnalyzer + | PatternAnalyzer + | LuceneStandardAnalyzer + | StopAnalyzer + | LexicalAnalyzer; + +export function lexicalAnalyzerUnionSerializer(item: LexicalAnalyzerUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomAnalyzer": + return customAnalyzerSerializer(item as CustomAnalyzer); + + case "#Microsoft.Azure.Search.PatternAnalyzer": + return patternAnalyzerSerializer(item as PatternAnalyzer); + + case "#Microsoft.Azure.Search.StandardAnalyzer": + return luceneStandardAnalyzerSerializer(item as LuceneStandardAnalyzer); + + case "#Microsoft.Azure.Search.StopAnalyzer": + return stopAnalyzerSerializer(item as StopAnalyzer); + + default: + return lexicalAnalyzerSerializer(item); + } +} + +export function lexicalAnalyzerUnionDeserializer(item: any): LexicalAnalyzerUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomAnalyzer": + return customAnalyzerDeserializer(item as CustomAnalyzer); + + case "#Microsoft.Azure.Search.PatternAnalyzer": + return patternAnalyzerDeserializer(item as PatternAnalyzer); + + case "#Microsoft.Azure.Search.StandardAnalyzer": + return luceneStandardAnalyzerDeserializer(item as LuceneStandardAnalyzer); + + case "#Microsoft.Azure.Search.StopAnalyzer": + return stopAnalyzerDeserializer(item as StopAnalyzer); + + default: + return lexicalAnalyzerDeserializer(item); + } +} + +/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */ +export interface CustomAnalyzer extends LexicalAnalyzer { + /** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. */ + tokenizer: LexicalTokenizerName; + /** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ + tokenFilters?: TokenFilterName[]; + /** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ + charFilters?: CharFilterName[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.CustomAnalyzer"; +} + +export function customAnalyzerSerializer(item: CustomAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + tokenizer: item["tokenizer"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +export function customAnalyzerDeserializer(item: any): CustomAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + tokenizer: item["tokenizer"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +/** Defines the names of all tokenizers supported by the search engine. */ +export enum KnownLexicalTokenizerName { + /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html */ + Classic = "classic", + /** Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html */ + EdgeNGram = "edgeNGram", + /** Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html */ + Keyword = "keyword_v2", + /** Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html */ + Letter = "letter", + /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html */ + Lowercase = "lowercase", + /** Divides text using language-specific rules. */ + MicrosoftLanguageTokenizer = "microsoft_language_tokenizer", + /** Divides text using language-specific rules and reduces words to their base forms. */ + MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer", + /** Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html */ + NGram = "nGram", + /** Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html */ + PathHierarchy = "path_hierarchy_v2", + /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html */ + Pattern = "pattern", + /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html */ + Standard = "standard_v2", + /** Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html */ + UaxUrlEmail = "uax_url_email", + /** Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html */ + Whitespace = "whitespace", +} + +/** + * Defines the names of all tokenizers supported by the search engine. \ + * {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html \ + * **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html \ + * **keyword_v2**: Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html \ + * **letter**: Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html \ + * **lowercase**: Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html \ + * **microsoft_language_tokenizer**: Divides text using language-specific rules. \ + * **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \ + * **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html \ + * **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html \ + * **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html \ + * **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html \ + * **uax_url_email**: Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html \ + * **whitespace**: Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html + */ +export type LexicalTokenizerName = string; + +/** Defines the names of all token filters supported by the search engine. */ +export enum KnownTokenFilterName { + /** A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html */ + ArabicNormalization = "arabic_normalization", + /** Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html */ + Apostrophe = "apostrophe", + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html */ + CjkBigram = "cjk_bigram", + /** Normalizes CJK width differences. Folds full-width ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html */ + CjkWidth = "cjk_width", + /** Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html */ + Classic = "classic", + /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html */ + CommonGram = "common_grams", + /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html */ + EdgeNGram = "edgeNGram_v2", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */ + Elision = "elision", + /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html */ + GermanNormalization = "german_normalization", + /** Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html */ + HindiNormalization = "hindi_normalization", + /** Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html */ + IndicNormalization = "indic_normalization", + /** Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html */ + KeywordRepeat = "keyword_repeat", + /** A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html */ + KStem = "kstem", + /** Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html */ + Length = "length", + /** Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html */ + Limit = "limit", + /** Normalizes token text to lower case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html */ + NGram = "nGram_v2", + /** Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html */ + PersianNormalization = "persian_normalization", + /** Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html */ + Phonetic = "phonetic", + /** Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer */ + PorterStem = "porter_stem", + /** Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */ + Reverse = "reverse", + /** Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html */ + ScandinavianNormalization = "scandinavian_normalization", + /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html */ + ScandinavianFoldingNormalization = "scandinavian_folding", + /** Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html */ + Shingle = "shingle", + /** A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html */ + Snowball = "snowball", + /** Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html */ + SoraniNormalization = "sorani_normalization", + /** Language specific stemming filter. See https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters */ + Stemmer = "stemmer", + /** Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html */ + Stopwords = "stopwords", + /** Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html */ + Trim = "trim", + /** Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html */ + Truncate = "truncate", + /** Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html */ + Unique = "unique", + /** Normalizes token text to upper case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */ + Uppercase = "uppercase", + /** Splits words into subwords and performs optional transformations on subword groups. */ + WordDelimiter = "word_delimiter", +} + +/** + * Defines the names of all token filters supported by the search engine. \ + * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ + * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ + * **cjk_width**: Normalizes CJK width differences. Folds full-width ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ + * **classic**: Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ + * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ + * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ + * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ + * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ + * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ + * **kstem**: A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ + * **length**: Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ + * **limit**: Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ + * **lowercase**: Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **nGram_v2**: Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ + * **persian_normalization**: Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ + * **phonetic**: Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ + * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer \ + * **reverse**: Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ + * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ + * **shingle**: Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ + * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ + * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ + * **stemmer**: Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ + * **stopwords**: Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ + * **trim**: Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ + * **truncate**: Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ + * **unique**: Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ + * **uppercase**: Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ + * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. + */ +export type TokenFilterName = string; + +/** Defines the names of all character filters supported by the search engine. */ +export enum KnownCharFilterName { + /** A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html */ + HtmlStrip = "html_strip", +} + +/** + * Defines the names of all character filters supported by the search engine. \ + * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html + */ +export type CharFilterName = string; + +/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */ +export interface PatternAnalyzer extends LexicalAnalyzer { + /** A value indicating whether terms should be lower-cased. Default is true. */ + lowerCaseTerms?: boolean; + /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ + pattern?: string; + /** Regular expression flags. */ + flags?: RegexFlags; + /** A list of stopwords. */ + stopwords?: string[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.PatternAnalyzer"; +} + +export function patternAnalyzerSerializer(item: PatternAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + lowercase: item["lowerCaseTerms"], + pattern: item["pattern"], + flags: item["flags"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function patternAnalyzerDeserializer(item: any): PatternAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + lowerCaseTerms: item["lowercase"], + pattern: item["pattern"], + flags: item["flags"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +/** Defines flags that can be combined to control how regular expressions are used in the pattern analyzer and pattern tokenizer. */ +export enum KnownRegexFlags { + /** Enables canonical equivalence. */ + CanonEq = "CANON_EQ", + /** Enables case-insensitive matching. */ + CaseInsensitive = "CASE_INSENSITIVE", + /** Permits whitespace and comments in the pattern. */ + Comments = "COMMENTS", + /** Enables dotall mode. */ + DotAll = "DOTALL", + /** Enables literal parsing of the pattern. */ + Literal = "LITERAL", + /** Enables multiline mode. */ + Multiline = "MULTILINE", + /** Enables Unicode-aware case folding. */ + UnicodeCase = "UNICODE_CASE", + /** Enables Unix lines mode. */ + UnixLines = "UNIX_LINES", +} + +/** + * Defines flags that can be combined to control how regular expressions are used in the pattern analyzer and pattern tokenizer. \ + * {@link KnownRegexFlags} can be used interchangeably with RegexFlags, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **CANON_EQ**: Enables canonical equivalence. \ + * **CASE_INSENSITIVE**: Enables case-insensitive matching. \ + * **COMMENTS**: Permits whitespace and comments in the pattern. \ + * **DOTALL**: Enables dotall mode. \ + * **LITERAL**: Enables literal parsing of the pattern. \ + * **MULTILINE**: Enables multiline mode. \ + * **UNICODE_CASE**: Enables Unicode-aware case folding. \ + * **UNIX_LINES**: Enables Unix lines mode. + */ +export type RegexFlags = string; + +/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */ +export interface LuceneStandardAnalyzer extends LexicalAnalyzer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A list of stopwords. */ + stopwords?: string[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.StandardAnalyzer"; +} + +export function luceneStandardAnalyzerSerializer(item: LuceneStandardAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function luceneStandardAnalyzerDeserializer(item: any): LuceneStandardAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */ +export interface StopAnalyzer extends LexicalAnalyzer { + /** A list of stopwords. */ + stopwords?: string[]; + /** A URI fragment specifying the type of analyzer. */ + odatatype: "#Microsoft.Azure.Search.StopAnalyzer"; +} + +export function stopAnalyzerSerializer(item: StopAnalyzer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function stopAnalyzerDeserializer(item: any): StopAnalyzer { + return { + odatatype: item["@odata.type"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + }; +} + +export function lexicalTokenizerUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return lexicalTokenizerUnionSerializer(item); + }); +} + +export function lexicalTokenizerUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return lexicalTokenizerUnionDeserializer(item); + }); +} + +/** Base type for tokenizers. */ +export interface LexicalTokenizer { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.ClassicTokenizer, #Microsoft.Azure.Search.EdgeNGramTokenizer, #Microsoft.Azure.Search.KeywordTokenizerV2, #Microsoft.Azure.Search.MicrosoftLanguageTokenizer, #Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer, #Microsoft.Azure.Search.NGramTokenizer, #Microsoft.Azure.Search.PathHierarchyTokenizerV2, #Microsoft.Azure.Search.PatternTokenizer, #Microsoft.Azure.Search.StandardTokenizerV2, #Microsoft.Azure.Search.UaxUrlEmailTokenizer */ + odatatype: string; + /** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function lexicalTokenizerSerializer(item: LexicalTokenizer): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function lexicalTokenizerDeserializer(item: any): LexicalTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for LexicalTokenizerUnion */ +export type LexicalTokenizerUnion = + | ClassicTokenizer + | EdgeNGramTokenizer + | KeywordTokenizer + | MicrosoftLanguageTokenizer + | MicrosoftLanguageStemmingTokenizer + | NGramTokenizer + | PathHierarchyTokenizer + | PatternTokenizer + | LuceneStandardTokenizer + | UaxUrlEmailTokenizer + | LexicalTokenizer; + +export function lexicalTokenizerUnionSerializer(item: LexicalTokenizerUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicTokenizer": + return classicTokenizerSerializer(item as ClassicTokenizer); + + case "#Microsoft.Azure.Search.EdgeNGramTokenizer": + return edgeNGramTokenizerSerializer(item as EdgeNGramTokenizer); + + case "#Microsoft.Azure.Search.KeywordTokenizerV2": + return keywordTokenizerSerializer(item as KeywordTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": + return microsoftLanguageTokenizerSerializer(item as MicrosoftLanguageTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": + return microsoftLanguageStemmingTokenizerSerializer( + item as MicrosoftLanguageStemmingTokenizer, + ); + + case "#Microsoft.Azure.Search.NGramTokenizer": + return nGramTokenizerSerializer(item as NGramTokenizer); + + case "#Microsoft.Azure.Search.PathHierarchyTokenizerV2": + return pathHierarchyTokenizerSerializer(item as PathHierarchyTokenizer); + + case "#Microsoft.Azure.Search.PatternTokenizer": + return patternTokenizerSerializer(item as PatternTokenizer); + + case "#Microsoft.Azure.Search.StandardTokenizerV2": + return luceneStandardTokenizerSerializer(item as LuceneStandardTokenizer); + + case "#Microsoft.Azure.Search.UaxUrlEmailTokenizer": + return uaxUrlEmailTokenizerSerializer(item as UaxUrlEmailTokenizer); + + default: + return lexicalTokenizerSerializer(item); + } +} + +export function lexicalTokenizerUnionDeserializer(item: any): LexicalTokenizerUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicTokenizer": + return classicTokenizerDeserializer(item as ClassicTokenizer); + + case "#Microsoft.Azure.Search.EdgeNGramTokenizer": + return edgeNGramTokenizerDeserializer(item as EdgeNGramTokenizer); + + case "#Microsoft.Azure.Search.KeywordTokenizerV2": + return keywordTokenizerDeserializer(item as KeywordTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": + return microsoftLanguageTokenizerDeserializer(item as MicrosoftLanguageTokenizer); + + case "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": + return microsoftLanguageStemmingTokenizerDeserializer( + item as MicrosoftLanguageStemmingTokenizer, + ); + + case "#Microsoft.Azure.Search.NGramTokenizer": + return nGramTokenizerDeserializer(item as NGramTokenizer); + + case "#Microsoft.Azure.Search.PathHierarchyTokenizerV2": + return pathHierarchyTokenizerDeserializer(item as PathHierarchyTokenizer); + + case "#Microsoft.Azure.Search.PatternTokenizer": + return patternTokenizerDeserializer(item as PatternTokenizer); + + case "#Microsoft.Azure.Search.StandardTokenizerV2": + return luceneStandardTokenizerDeserializer(item as LuceneStandardTokenizer); + + case "#Microsoft.Azure.Search.UaxUrlEmailTokenizer": + return uaxUrlEmailTokenizerDeserializer(item as UaxUrlEmailTokenizer); + + default: + return lexicalTokenizerDeserializer(item); + } +} + +/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */ +export interface ClassicTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.ClassicTokenizer"; +} + +export function classicTokenizerSerializer(item: ClassicTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function classicTokenizerDeserializer(item: any): ClassicTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ +export interface EdgeNGramTokenizer extends LexicalTokenizer { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Character classes to keep in the tokens. */ + tokenChars?: TokenCharacterKind[]; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer"; +} + +export function edgeNGramTokenizerSerializer(item: EdgeNGramTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +export function edgeNGramTokenizerDeserializer(item: any): EdgeNGramTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +/** Represents classes of characters on which a token filter can operate. */ +export enum KnownTokenCharacterKind { + /** Keeps letters in tokens. */ + Letter = "letter", + /** Keeps digits in tokens. */ + Digit = "digit", + /** Keeps whitespace in tokens. */ + Whitespace = "whitespace", + /** Keeps punctuation in tokens. */ + Punctuation = "punctuation", + /** Keeps symbols in tokens. */ + Symbol = "symbol", +} + +/** + * Represents classes of characters on which a token filter can operate. \ + * {@link KnownTokenCharacterKind} can be used interchangeably with TokenCharacterKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **letter**: Keeps letters in tokens. \ + * **digit**: Keeps digits in tokens. \ + * **whitespace**: Keeps whitespace in tokens. \ + * **punctuation**: Keeps punctuation in tokens. \ + * **symbol**: Keeps symbols in tokens. + */ +export type TokenCharacterKind = string; + +/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */ +export interface KeywordTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2"; +} + +export function keywordTokenizerSerializer(item: KeywordTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function keywordTokenizerDeserializer(item: any): KeywordTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +/** Divides text using language-specific rules. */ +export interface MicrosoftLanguageTokenizer extends LexicalTokenizer { + /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ + maxTokenLength?: number; + /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ + isSearchTokenizer?: boolean; + /** The language to use. The default is English. */ + language?: MicrosoftTokenizerLanguage; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"; +} + +export function microsoftLanguageTokenizerSerializer(item: MicrosoftLanguageTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +export function microsoftLanguageTokenizerDeserializer(item: any): MicrosoftLanguageTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +/** Lists the languages supported by the Microsoft language tokenizer. */ +export enum KnownMicrosoftTokenizerLanguage { + /** Selects the Microsoft tokenizer for Bangla. */ + Bangla = "bangla", + /** Selects the Microsoft tokenizer for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the Microsoft tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Microsoft tokenizer for Chinese (Simplified). */ + ChineseSimplified = "chineseSimplified", + /** Selects the Microsoft tokenizer for Chinese (Traditional). */ + ChineseTraditional = "chineseTraditional", + /** Selects the Microsoft tokenizer for Croatian. */ + Croatian = "croatian", + /** Selects the Microsoft tokenizer for Czech. */ + Czech = "czech", + /** Selects the Microsoft tokenizer for Danish. */ + Danish = "danish", + /** Selects the Microsoft tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Microsoft tokenizer for English. */ + English = "english", + /** Selects the Microsoft tokenizer for French. */ + French = "french", + /** Selects the Microsoft tokenizer for German. */ + German = "german", + /** Selects the Microsoft tokenizer for Greek. */ + Greek = "greek", + /** Selects the Microsoft tokenizer for Gujarati. */ + Gujarati = "gujarati", + /** Selects the Microsoft tokenizer for Hindi. */ + Hindi = "hindi", + /** Selects the Microsoft tokenizer for Icelandic. */ + Icelandic = "icelandic", + /** Selects the Microsoft tokenizer for Indonesian. */ + Indonesian = "indonesian", + /** Selects the Microsoft tokenizer for Italian. */ + Italian = "italian", + /** Selects the Microsoft tokenizer for Japanese. */ + Japanese = "japanese", + /** Selects the Microsoft tokenizer for Kannada. */ + Kannada = "kannada", + /** Selects the Microsoft tokenizer for Korean. */ + Korean = "korean", + /** Selects the Microsoft tokenizer for Malay. */ + Malay = "malay", + /** Selects the Microsoft tokenizer for Malayalam. */ + Malayalam = "malayalam", + /** Selects the Microsoft tokenizer for Marathi. */ + Marathi = "marathi", + /** Selects the Microsoft tokenizer for Norwegian (Bokmål). */ + NorwegianBokmaal = "norwegianBokmaal", + /** Selects the Microsoft tokenizer for Polish. */ + Polish = "polish", + /** Selects the Microsoft tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Microsoft tokenizer for Portuguese (Brazil). */ + PortugueseBrazilian = "portugueseBrazilian", + /** Selects the Microsoft tokenizer for Punjabi. */ + Punjabi = "punjabi", + /** Selects the Microsoft tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Microsoft tokenizer for Russian. */ + Russian = "russian", + /** Selects the Microsoft tokenizer for Serbian (Cyrillic). */ + SerbianCyrillic = "serbianCyrillic", + /** Selects the Microsoft tokenizer for Serbian (Latin). */ + SerbianLatin = "serbianLatin", + /** Selects the Microsoft tokenizer for Slovenian. */ + Slovenian = "slovenian", + /** Selects the Microsoft tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Microsoft tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Microsoft tokenizer for Tamil. */ + Tamil = "tamil", + /** Selects the Microsoft tokenizer for Telugu. */ + Telugu = "telugu", + /** Selects the Microsoft tokenizer for Thai. */ + Thai = "thai", + /** Selects the Microsoft tokenizer for Ukrainian. */ + Ukrainian = "ukrainian", + /** Selects the Microsoft tokenizer for Urdu. */ + Urdu = "urdu", + /** Selects the Microsoft tokenizer for Vietnamese. */ + Vietnamese = "vietnamese", +} + +/** + * Lists the languages supported by the Microsoft language tokenizer. \ + * {@link KnownMicrosoftTokenizerLanguage} can be used interchangeably with MicrosoftTokenizerLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **bangla**: Selects the Microsoft tokenizer for Bangla. \ + * **bulgarian**: Selects the Microsoft tokenizer for Bulgarian. \ + * **catalan**: Selects the Microsoft tokenizer for Catalan. \ + * **chineseSimplified**: Selects the Microsoft tokenizer for Chinese (Simplified). \ + * **chineseTraditional**: Selects the Microsoft tokenizer for Chinese (Traditional). \ + * **croatian**: Selects the Microsoft tokenizer for Croatian. \ + * **czech**: Selects the Microsoft tokenizer for Czech. \ + * **danish**: Selects the Microsoft tokenizer for Danish. \ + * **dutch**: Selects the Microsoft tokenizer for Dutch. \ + * **english**: Selects the Microsoft tokenizer for English. \ + * **french**: Selects the Microsoft tokenizer for French. \ + * **german**: Selects the Microsoft tokenizer for German. \ + * **greek**: Selects the Microsoft tokenizer for Greek. \ + * **gujarati**: Selects the Microsoft tokenizer for Gujarati. \ + * **hindi**: Selects the Microsoft tokenizer for Hindi. \ + * **icelandic**: Selects the Microsoft tokenizer for Icelandic. \ + * **indonesian**: Selects the Microsoft tokenizer for Indonesian. \ + * **italian**: Selects the Microsoft tokenizer for Italian. \ + * **japanese**: Selects the Microsoft tokenizer for Japanese. \ + * **kannada**: Selects the Microsoft tokenizer for Kannada. \ + * **korean**: Selects the Microsoft tokenizer for Korean. \ + * **malay**: Selects the Microsoft tokenizer for Malay. \ + * **malayalam**: Selects the Microsoft tokenizer for Malayalam. \ + * **marathi**: Selects the Microsoft tokenizer for Marathi. \ + * **norwegianBokmaal**: Selects the Microsoft tokenizer for Norwegian (Bokmål). \ + * **polish**: Selects the Microsoft tokenizer for Polish. \ + * **portuguese**: Selects the Microsoft tokenizer for Portuguese. \ + * **portugueseBrazilian**: Selects the Microsoft tokenizer for Portuguese (Brazil). \ + * **punjabi**: Selects the Microsoft tokenizer for Punjabi. \ + * **romanian**: Selects the Microsoft tokenizer for Romanian. \ + * **russian**: Selects the Microsoft tokenizer for Russian. \ + * **serbianCyrillic**: Selects the Microsoft tokenizer for Serbian (Cyrillic). \ + * **serbianLatin**: Selects the Microsoft tokenizer for Serbian (Latin). \ + * **slovenian**: Selects the Microsoft tokenizer for Slovenian. \ + * **spanish**: Selects the Microsoft tokenizer for Spanish. \ + * **swedish**: Selects the Microsoft tokenizer for Swedish. \ + * **tamil**: Selects the Microsoft tokenizer for Tamil. \ + * **telugu**: Selects the Microsoft tokenizer for Telugu. \ + * **thai**: Selects the Microsoft tokenizer for Thai. \ + * **ukrainian**: Selects the Microsoft tokenizer for Ukrainian. \ + * **urdu**: Selects the Microsoft tokenizer for Urdu. \ + * **vietnamese**: Selects the Microsoft tokenizer for Vietnamese. + */ +export type MicrosoftTokenizerLanguage = string; + +/** Divides text using language-specific rules and reduces words to their base forms. */ +export interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer { + /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */ + maxTokenLength?: number; + /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */ + isSearchTokenizer?: boolean; + /** The language to use. The default is English. */ + language?: MicrosoftStemmingTokenizerLanguage; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"; +} + +export function microsoftLanguageStemmingTokenizerSerializer( + item: MicrosoftLanguageStemmingTokenizer, +): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +export function microsoftLanguageStemmingTokenizerDeserializer( + item: any, +): MicrosoftLanguageStemmingTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + isSearchTokenizer: item["isSearchTokenizer"], + language: item["language"], + }; +} + +/** Lists the languages supported by the Microsoft language stemming tokenizer. */ +export enum KnownMicrosoftStemmingTokenizerLanguage { + /** Selects the Microsoft stemming tokenizer for Arabic. */ + Arabic = "arabic", + /** Selects the Microsoft stemming tokenizer for Bangla. */ + Bangla = "bangla", + /** Selects the Microsoft stemming tokenizer for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the Microsoft stemming tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Microsoft stemming tokenizer for Croatian. */ + Croatian = "croatian", + /** Selects the Microsoft stemming tokenizer for Czech. */ + Czech = "czech", + /** Selects the Microsoft stemming tokenizer for Danish. */ + Danish = "danish", + /** Selects the Microsoft stemming tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Microsoft stemming tokenizer for English. */ + English = "english", + /** Selects the Microsoft stemming tokenizer for Estonian. */ + Estonian = "estonian", + /** Selects the Microsoft stemming tokenizer for Finnish. */ + Finnish = "finnish", + /** Selects the Microsoft stemming tokenizer for French. */ + French = "french", + /** Selects the Microsoft stemming tokenizer for German. */ + German = "german", + /** Selects the Microsoft stemming tokenizer for Greek. */ + Greek = "greek", + /** Selects the Microsoft stemming tokenizer for Gujarati. */ + Gujarati = "gujarati", + /** Selects the Microsoft stemming tokenizer for Hebrew. */ + Hebrew = "hebrew", + /** Selects the Microsoft stemming tokenizer for Hindi. */ + Hindi = "hindi", + /** Selects the Microsoft stemming tokenizer for Hungarian. */ + Hungarian = "hungarian", + /** Selects the Microsoft stemming tokenizer for Icelandic. */ + Icelandic = "icelandic", + /** Selects the Microsoft stemming tokenizer for Indonesian. */ + Indonesian = "indonesian", + /** Selects the Microsoft stemming tokenizer for Italian. */ + Italian = "italian", + /** Selects the Microsoft stemming tokenizer for Kannada. */ + Kannada = "kannada", + /** Selects the Microsoft stemming tokenizer for Latvian. */ + Latvian = "latvian", + /** Selects the Microsoft stemming tokenizer for Lithuanian. */ + Lithuanian = "lithuanian", + /** Selects the Microsoft stemming tokenizer for Malay. */ + Malay = "malay", + /** Selects the Microsoft stemming tokenizer for Malayalam. */ + Malayalam = "malayalam", + /** Selects the Microsoft stemming tokenizer for Marathi. */ + Marathi = "marathi", + /** Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). */ + NorwegianBokmaal = "norwegianBokmaal", + /** Selects the Microsoft stemming tokenizer for Polish. */ + Polish = "polish", + /** Selects the Microsoft stemming tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Microsoft stemming tokenizer for Portuguese (Brazil). */ + PortugueseBrazilian = "portugueseBrazilian", + /** Selects the Microsoft stemming tokenizer for Punjabi. */ + Punjabi = "punjabi", + /** Selects the Microsoft stemming tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Microsoft stemming tokenizer for Russian. */ + Russian = "russian", + /** Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). */ + SerbianCyrillic = "serbianCyrillic", + /** Selects the Microsoft stemming tokenizer for Serbian (Latin). */ + SerbianLatin = "serbianLatin", + /** Selects the Microsoft stemming tokenizer for Slovak. */ + Slovak = "slovak", + /** Selects the Microsoft stemming tokenizer for Slovenian. */ + Slovenian = "slovenian", + /** Selects the Microsoft stemming tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Microsoft stemming tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Microsoft stemming tokenizer for Tamil. */ + Tamil = "tamil", + /** Selects the Microsoft stemming tokenizer for Telugu. */ + Telugu = "telugu", + /** Selects the Microsoft stemming tokenizer for Turkish. */ + Turkish = "turkish", + /** Selects the Microsoft stemming tokenizer for Ukrainian. */ + Ukrainian = "ukrainian", + /** Selects the Microsoft stemming tokenizer for Urdu. */ + Urdu = "urdu", +} + +/** + * Lists the languages supported by the Microsoft language stemming tokenizer. \ + * {@link KnownMicrosoftStemmingTokenizerLanguage} can be used interchangeably with MicrosoftStemmingTokenizerLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic**: Selects the Microsoft stemming tokenizer for Arabic. \ + * **bangla**: Selects the Microsoft stemming tokenizer for Bangla. \ + * **bulgarian**: Selects the Microsoft stemming tokenizer for Bulgarian. \ + * **catalan**: Selects the Microsoft stemming tokenizer for Catalan. \ + * **croatian**: Selects the Microsoft stemming tokenizer for Croatian. \ + * **czech**: Selects the Microsoft stemming tokenizer for Czech. \ + * **danish**: Selects the Microsoft stemming tokenizer for Danish. \ + * **dutch**: Selects the Microsoft stemming tokenizer for Dutch. \ + * **english**: Selects the Microsoft stemming tokenizer for English. \ + * **estonian**: Selects the Microsoft stemming tokenizer for Estonian. \ + * **finnish**: Selects the Microsoft stemming tokenizer for Finnish. \ + * **french**: Selects the Microsoft stemming tokenizer for French. \ + * **german**: Selects the Microsoft stemming tokenizer for German. \ + * **greek**: Selects the Microsoft stemming tokenizer for Greek. \ + * **gujarati**: Selects the Microsoft stemming tokenizer for Gujarati. \ + * **hebrew**: Selects the Microsoft stemming tokenizer for Hebrew. \ + * **hindi**: Selects the Microsoft stemming tokenizer for Hindi. \ + * **hungarian**: Selects the Microsoft stemming tokenizer for Hungarian. \ + * **icelandic**: Selects the Microsoft stemming tokenizer for Icelandic. \ + * **indonesian**: Selects the Microsoft stemming tokenizer for Indonesian. \ + * **italian**: Selects the Microsoft stemming tokenizer for Italian. \ + * **kannada**: Selects the Microsoft stemming tokenizer for Kannada. \ + * **latvian**: Selects the Microsoft stemming tokenizer for Latvian. \ + * **lithuanian**: Selects the Microsoft stemming tokenizer for Lithuanian. \ + * **malay**: Selects the Microsoft stemming tokenizer for Malay. \ + * **malayalam**: Selects the Microsoft stemming tokenizer for Malayalam. \ + * **marathi**: Selects the Microsoft stemming tokenizer for Marathi. \ + * **norwegianBokmaal**: Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). \ + * **polish**: Selects the Microsoft stemming tokenizer for Polish. \ + * **portuguese**: Selects the Microsoft stemming tokenizer for Portuguese. \ + * **portugueseBrazilian**: Selects the Microsoft stemming tokenizer for Portuguese (Brazil). \ + * **punjabi**: Selects the Microsoft stemming tokenizer for Punjabi. \ + * **romanian**: Selects the Microsoft stemming tokenizer for Romanian. \ + * **russian**: Selects the Microsoft stemming tokenizer for Russian. \ + * **serbianCyrillic**: Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). \ + * **serbianLatin**: Selects the Microsoft stemming tokenizer for Serbian (Latin). \ + * **slovak**: Selects the Microsoft stemming tokenizer for Slovak. \ + * **slovenian**: Selects the Microsoft stemming tokenizer for Slovenian. \ + * **spanish**: Selects the Microsoft stemming tokenizer for Spanish. \ + * **swedish**: Selects the Microsoft stemming tokenizer for Swedish. \ + * **tamil**: Selects the Microsoft stemming tokenizer for Tamil. \ + * **telugu**: Selects the Microsoft stemming tokenizer for Telugu. \ + * **turkish**: Selects the Microsoft stemming tokenizer for Turkish. \ + * **ukrainian**: Selects the Microsoft stemming tokenizer for Ukrainian. \ + * **urdu**: Selects the Microsoft stemming tokenizer for Urdu. + */ +export type MicrosoftStemmingTokenizerLanguage = string; + +/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */ +export interface NGramTokenizer extends LexicalTokenizer { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Character classes to keep in the tokens. */ + tokenChars?: TokenCharacterKind[]; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.NGramTokenizer"; +} + +export function nGramTokenizerSerializer(item: NGramTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +export function nGramTokenizerDeserializer(item: any): NGramTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + tokenChars: !item["tokenChars"] + ? item["tokenChars"] + : item["tokenChars"].map((p: any) => { + return p; + }), + }; +} + +/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */ +export interface PathHierarchyTokenizer extends LexicalTokenizer { + /** The delimiter character to use. Default is "/". */ + delimiter?: string; + /** A value that, if set, replaces the delimiter character. Default is "/". */ + replacement?: string; + /** The maximum token length. Default and maximum is 300. */ + maxTokenLength?: number; + /** A value indicating whether to generate tokens in reverse order. Default is false. */ + reverseTokenOrder?: boolean; + /** The number of initial tokens to skip. Default is 0. */ + numberOfTokensToSkip?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2"; +} + +export function pathHierarchyTokenizerSerializer(item: PathHierarchyTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + delimiter: item["delimiter"], + replacement: item["replacement"], + maxTokenLength: item["maxTokenLength"], + reverse: item["reverseTokenOrder"], + skip: item["numberOfTokensToSkip"], + }; +} + +export function pathHierarchyTokenizerDeserializer(item: any): PathHierarchyTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + delimiter: item["delimiter"], + replacement: item["replacement"], + maxTokenLength: item["maxTokenLength"], + reverseTokenOrder: item["reverse"], + numberOfTokensToSkip: item["skip"], + }; +} + +/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */ +export interface PatternTokenizer extends LexicalTokenizer { + /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */ + pattern?: string; + /** Regular expression flags. */ + flags?: RegexFlags; + /** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */ + group?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.PatternTokenizer"; +} + +export function patternTokenizerSerializer(item: PatternTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + pattern: item["pattern"], + flags: item["flags"], + group: item["group"], + }; +} + +export function patternTokenizerDeserializer(item: any): PatternTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + pattern: item["pattern"], + flags: item["flags"], + group: item["group"], + }; +} + +/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */ +export interface LuceneStandardTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2"; +} + +export function luceneStandardTokenizerSerializer(item: LuceneStandardTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function luceneStandardTokenizerDeserializer(item: any): LuceneStandardTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */ +export interface UaxUrlEmailTokenizer extends LexicalTokenizer { + /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */ + maxTokenLength?: number; + /** A URI fragment specifying the type of tokenizer. */ + odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer"; +} + +export function uaxUrlEmailTokenizerSerializer(item: UaxUrlEmailTokenizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function uaxUrlEmailTokenizerDeserializer(item: any): UaxUrlEmailTokenizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenLength: item["maxTokenLength"], + }; +} + +export function tokenFilterUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return tokenFilterUnionSerializer(item); + }); +} + +export function tokenFilterUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return tokenFilterUnionDeserializer(item); + }); +} + +/** Base type for token filters. */ +export interface TokenFilter { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.AsciiFoldingTokenFilter, #Microsoft.Azure.Search.CjkBigramTokenFilter, #Microsoft.Azure.Search.CommonGramTokenFilter, #Microsoft.Azure.Search.DictionaryDecompounderTokenFilter, #Microsoft.Azure.Search.EdgeNGramTokenFilterV2, #Microsoft.Azure.Search.ElisionTokenFilter, #Microsoft.Azure.Search.KeepTokenFilter, #Microsoft.Azure.Search.KeywordMarkerTokenFilter, #Microsoft.Azure.Search.LengthTokenFilter, #Microsoft.Azure.Search.LimitTokenFilter, #Microsoft.Azure.Search.NGramTokenFilterV2, #Microsoft.Azure.Search.PatternCaptureTokenFilter, #Microsoft.Azure.Search.PatternReplaceTokenFilter, #Microsoft.Azure.Search.PhoneticTokenFilter, #Microsoft.Azure.Search.ShingleTokenFilter, #Microsoft.Azure.Search.SnowballTokenFilter, #Microsoft.Azure.Search.StemmerTokenFilter, #Microsoft.Azure.Search.StemmerOverrideTokenFilter, #Microsoft.Azure.Search.StopwordsTokenFilter, #Microsoft.Azure.Search.SynonymTokenFilter, #Microsoft.Azure.Search.TruncateTokenFilter, #Microsoft.Azure.Search.UniqueTokenFilter, #Microsoft.Azure.Search.WordDelimiterTokenFilter */ + odatatype: string; + /** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function tokenFilterSerializer(item: TokenFilter): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function tokenFilterDeserializer(item: any): TokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for TokenFilterUnion */ +export type TokenFilterUnion = + | AsciiFoldingTokenFilter + | CjkBigramTokenFilter + | CommonGramTokenFilter + | DictionaryDecompounderTokenFilter + | EdgeNGramTokenFilter + | ElisionTokenFilter + | KeepTokenFilter + | KeywordMarkerTokenFilter + | LengthTokenFilter + | LimitTokenFilter + | NGramTokenFilter + | PatternCaptureTokenFilter + | PatternReplaceTokenFilter + | PhoneticTokenFilter + | ShingleTokenFilter + | SnowballTokenFilter + | StemmerTokenFilter + | StemmerOverrideTokenFilter + | StopwordsTokenFilter + | SynonymTokenFilter + | TruncateTokenFilter + | UniqueTokenFilter + | WordDelimiterTokenFilter + | TokenFilter; + +export function tokenFilterUnionSerializer(item: TokenFilterUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.AsciiFoldingTokenFilter": + return asciiFoldingTokenFilterSerializer(item as AsciiFoldingTokenFilter); + + case "#Microsoft.Azure.Search.CjkBigramTokenFilter": + return cjkBigramTokenFilterSerializer(item as CjkBigramTokenFilter); + + case "#Microsoft.Azure.Search.CommonGramTokenFilter": + return commonGramTokenFilterSerializer(item as CommonGramTokenFilter); + + case "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": + return dictionaryDecompounderTokenFilterSerializer(item as DictionaryDecompounderTokenFilter); + + case "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": + return edgeNGramTokenFilterSerializer(item as EdgeNGramTokenFilter); + + case "#Microsoft.Azure.Search.ElisionTokenFilter": + return elisionTokenFilterSerializer(item as ElisionTokenFilter); + + case "#Microsoft.Azure.Search.KeepTokenFilter": + return keepTokenFilterSerializer(item as KeepTokenFilter); + + case "#Microsoft.Azure.Search.KeywordMarkerTokenFilter": + return keywordMarkerTokenFilterSerializer(item as KeywordMarkerTokenFilter); + + case "#Microsoft.Azure.Search.LengthTokenFilter": + return lengthTokenFilterSerializer(item as LengthTokenFilter); + + case "#Microsoft.Azure.Search.LimitTokenFilter": + return limitTokenFilterSerializer(item as LimitTokenFilter); + + case "#Microsoft.Azure.Search.NGramTokenFilterV2": + return nGramTokenFilterSerializer(item as NGramTokenFilter); + + case "#Microsoft.Azure.Search.PatternCaptureTokenFilter": + return patternCaptureTokenFilterSerializer(item as PatternCaptureTokenFilter); + + case "#Microsoft.Azure.Search.PatternReplaceTokenFilter": + return patternReplaceTokenFilterSerializer(item as PatternReplaceTokenFilter); + + case "#Microsoft.Azure.Search.PhoneticTokenFilter": + return phoneticTokenFilterSerializer(item as PhoneticTokenFilter); + + case "#Microsoft.Azure.Search.ShingleTokenFilter": + return shingleTokenFilterSerializer(item as ShingleTokenFilter); + + case "#Microsoft.Azure.Search.SnowballTokenFilter": + return snowballTokenFilterSerializer(item as SnowballTokenFilter); + + case "#Microsoft.Azure.Search.StemmerTokenFilter": + return stemmerTokenFilterSerializer(item as StemmerTokenFilter); + + case "#Microsoft.Azure.Search.StemmerOverrideTokenFilter": + return stemmerOverrideTokenFilterSerializer(item as StemmerOverrideTokenFilter); + + case "#Microsoft.Azure.Search.StopwordsTokenFilter": + return stopwordsTokenFilterSerializer(item as StopwordsTokenFilter); + + case "#Microsoft.Azure.Search.SynonymTokenFilter": + return synonymTokenFilterSerializer(item as SynonymTokenFilter); + + case "#Microsoft.Azure.Search.TruncateTokenFilter": + return truncateTokenFilterSerializer(item as TruncateTokenFilter); + + case "#Microsoft.Azure.Search.UniqueTokenFilter": + return uniqueTokenFilterSerializer(item as UniqueTokenFilter); + + case "#Microsoft.Azure.Search.WordDelimiterTokenFilter": + return wordDelimiterTokenFilterSerializer(item as WordDelimiterTokenFilter); + + default: + return tokenFilterSerializer(item); + } +} + +export function tokenFilterUnionDeserializer(item: any): TokenFilterUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.AsciiFoldingTokenFilter": + return asciiFoldingTokenFilterDeserializer(item as AsciiFoldingTokenFilter); + + case "#Microsoft.Azure.Search.CjkBigramTokenFilter": + return cjkBigramTokenFilterDeserializer(item as CjkBigramTokenFilter); + + case "#Microsoft.Azure.Search.CommonGramTokenFilter": + return commonGramTokenFilterDeserializer(item as CommonGramTokenFilter); + + case "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": + return dictionaryDecompounderTokenFilterDeserializer( + item as DictionaryDecompounderTokenFilter, + ); + + case "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": + return edgeNGramTokenFilterDeserializer(item as EdgeNGramTokenFilter); + + case "#Microsoft.Azure.Search.ElisionTokenFilter": + return elisionTokenFilterDeserializer(item as ElisionTokenFilter); + + case "#Microsoft.Azure.Search.KeepTokenFilter": + return keepTokenFilterDeserializer(item as KeepTokenFilter); + + case "#Microsoft.Azure.Search.KeywordMarkerTokenFilter": + return keywordMarkerTokenFilterDeserializer(item as KeywordMarkerTokenFilter); + + case "#Microsoft.Azure.Search.LengthTokenFilter": + return lengthTokenFilterDeserializer(item as LengthTokenFilter); + + case "#Microsoft.Azure.Search.LimitTokenFilter": + return limitTokenFilterDeserializer(item as LimitTokenFilter); + + case "#Microsoft.Azure.Search.NGramTokenFilterV2": + return nGramTokenFilterDeserializer(item as NGramTokenFilter); + + case "#Microsoft.Azure.Search.PatternCaptureTokenFilter": + return patternCaptureTokenFilterDeserializer(item as PatternCaptureTokenFilter); + + case "#Microsoft.Azure.Search.PatternReplaceTokenFilter": + return patternReplaceTokenFilterDeserializer(item as PatternReplaceTokenFilter); + + case "#Microsoft.Azure.Search.PhoneticTokenFilter": + return phoneticTokenFilterDeserializer(item as PhoneticTokenFilter); + + case "#Microsoft.Azure.Search.ShingleTokenFilter": + return shingleTokenFilterDeserializer(item as ShingleTokenFilter); + + case "#Microsoft.Azure.Search.SnowballTokenFilter": + return snowballTokenFilterDeserializer(item as SnowballTokenFilter); + + case "#Microsoft.Azure.Search.StemmerTokenFilter": + return stemmerTokenFilterDeserializer(item as StemmerTokenFilter); + + case "#Microsoft.Azure.Search.StemmerOverrideTokenFilter": + return stemmerOverrideTokenFilterDeserializer(item as StemmerOverrideTokenFilter); + + case "#Microsoft.Azure.Search.StopwordsTokenFilter": + return stopwordsTokenFilterDeserializer(item as StopwordsTokenFilter); + + case "#Microsoft.Azure.Search.SynonymTokenFilter": + return synonymTokenFilterDeserializer(item as SynonymTokenFilter); + + case "#Microsoft.Azure.Search.TruncateTokenFilter": + return truncateTokenFilterDeserializer(item as TruncateTokenFilter); + + case "#Microsoft.Azure.Search.UniqueTokenFilter": + return uniqueTokenFilterDeserializer(item as UniqueTokenFilter); + + case "#Microsoft.Azure.Search.WordDelimiterTokenFilter": + return wordDelimiterTokenFilterDeserializer(item as WordDelimiterTokenFilter); + + default: + return tokenFilterDeserializer(item); + } +} + +/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */ +export interface AsciiFoldingTokenFilter extends TokenFilter { + /** A value indicating whether the original token will be kept. Default is false. */ + preserveOriginal?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter"; +} + +export function asciiFoldingTokenFilterSerializer(item: AsciiFoldingTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + preserveOriginal: item["preserveOriginal"], + }; +} + +export function asciiFoldingTokenFilterDeserializer(item: any): AsciiFoldingTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + preserveOriginal: item["preserveOriginal"], + }; +} + +/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */ +export interface CjkBigramTokenFilter extends TokenFilter { + /** The scripts to ignore. */ + ignoreScripts?: CjkBigramTokenFilterScripts[]; + /** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */ + outputUnigrams?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter"; +} + +export function cjkBigramTokenFilterSerializer(item: CjkBigramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + ignoreScripts: !item["ignoreScripts"] + ? item["ignoreScripts"] + : item["ignoreScripts"].map((p: any) => { + return p; + }), + outputUnigrams: item["outputUnigrams"], + }; +} + +export function cjkBigramTokenFilterDeserializer(item: any): CjkBigramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + ignoreScripts: !item["ignoreScripts"] + ? item["ignoreScripts"] + : item["ignoreScripts"].map((p: any) => { + return p; + }), + outputUnigrams: item["outputUnigrams"], + }; +} + +/** Scripts that can be ignored by CjkBigramTokenFilter. */ +export enum KnownCjkBigramTokenFilterScripts { + /** Ignore Han script when forming bigrams of CJK terms. */ + Han = "han", + /** Ignore Hiragana script when forming bigrams of CJK terms. */ + Hiragana = "hiragana", + /** Ignore Katakana script when forming bigrams of CJK terms. */ + Katakana = "katakana", + /** Ignore Hangul script when forming bigrams of CJK terms. */ + Hangul = "hangul", +} + +/** + * Scripts that can be ignored by CjkBigramTokenFilter. \ + * {@link KnownCjkBigramTokenFilterScripts} can be used interchangeably with CjkBigramTokenFilterScripts, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **han**: Ignore Han script when forming bigrams of CJK terms. \ + * **hiragana**: Ignore Hiragana script when forming bigrams of CJK terms. \ + * **katakana**: Ignore Katakana script when forming bigrams of CJK terms. \ + * **hangul**: Ignore Hangul script when forming bigrams of CJK terms. + */ +export type CjkBigramTokenFilterScripts = string; + +/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */ +export interface CommonGramTokenFilter extends TokenFilter { + /** The set of common words. */ + commonWords: string[]; + /** A value indicating whether common words matching will be case insensitive. Default is false. */ + ignoreCase?: boolean; + /** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */ + useQueryMode?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter"; +} + +export function commonGramTokenFilterSerializer(item: CommonGramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + commonWords: item["commonWords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + queryMode: item["useQueryMode"], + }; +} + +export function commonGramTokenFilterDeserializer(item: any): CommonGramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + commonWords: item["commonWords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + useQueryMode: item["queryMode"], + }; +} + +/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */ +export interface DictionaryDecompounderTokenFilter extends TokenFilter { + /** The list of words to match against. */ + wordList: string[]; + /** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */ + minWordSize?: number; + /** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */ + minSubwordSize?: number; + /** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */ + maxSubwordSize?: number; + /** A value indicating whether to add only the longest matching subword to the output. Default is false. */ + onlyLongestMatch?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"; +} + +export function dictionaryDecompounderTokenFilterSerializer( + item: DictionaryDecompounderTokenFilter, +): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + wordList: item["wordList"].map((p: any) => { + return p; + }), + minWordSize: item["minWordSize"], + minSubwordSize: item["minSubwordSize"], + maxSubwordSize: item["maxSubwordSize"], + onlyLongestMatch: item["onlyLongestMatch"], + }; +} + +export function dictionaryDecompounderTokenFilterDeserializer( + item: any, +): DictionaryDecompounderTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + wordList: item["wordList"].map((p: any) => { + return p; + }), + minWordSize: item["minWordSize"], + minSubwordSize: item["minSubwordSize"], + maxSubwordSize: item["maxSubwordSize"], + onlyLongestMatch: item["onlyLongestMatch"], + }; +} + +/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */ +export interface EdgeNGramTokenFilter extends TokenFilter { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** Specifies which side of the input the n-gram should be generated from. Default is "front". */ + side?: EdgeNGramTokenFilterSide; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"; +} + +export function edgeNGramTokenFilterSerializer(item: EdgeNGramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + side: item["side"], + }; +} + +export function edgeNGramTokenFilterDeserializer(item: any): EdgeNGramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + side: item["side"], + }; +} + +/** Specifies which side of the input an n-gram should be generated from. */ +export enum KnownEdgeNGramTokenFilterSide { + /** Specifies that the n-gram should be generated from the front of the input. */ + Front = "front", + /** Specifies that the n-gram should be generated from the back of the input. */ + Back = "back", +} + +/** + * Specifies which side of the input an n-gram should be generated from. \ + * {@link KnownEdgeNGramTokenFilterSide} can be used interchangeably with EdgeNGramTokenFilterSide, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **front**: Specifies that the n-gram should be generated from the front of the input. \ + * **back**: Specifies that the n-gram should be generated from the back of the input. + */ +export type EdgeNGramTokenFilterSide = string; + +/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. */ +export interface ElisionTokenFilter extends TokenFilter { + /** The set of articles to remove. */ + articles?: string[]; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter"; +} + +export function elisionTokenFilterSerializer(item: ElisionTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + articles: !item["articles"] + ? item["articles"] + : item["articles"].map((p: any) => { + return p; + }), + }; +} + +export function elisionTokenFilterDeserializer(item: any): ElisionTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + articles: !item["articles"] + ? item["articles"] + : item["articles"].map((p: any) => { + return p; + }), + }; +} + +/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */ +export interface KeepTokenFilter extends TokenFilter { + /** The list of words to keep. */ + keepWords: string[]; + /** A value indicating whether to lower case all words first. Default is false. */ + lowerCaseKeepWords?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; +} + +export function keepTokenFilterSerializer(item: KeepTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + keepWords: item["keepWords"].map((p: any) => { + return p; + }), + keepWordsCase: item["lowerCaseKeepWords"], + }; +} + +export function keepTokenFilterDeserializer(item: any): KeepTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + keepWords: item["keepWords"].map((p: any) => { + return p; + }), + lowerCaseKeepWords: item["keepWordsCase"], + }; +} + +/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */ +export interface KeywordMarkerTokenFilter extends TokenFilter { + /** A list of words to mark as keywords. */ + keywords: string[]; + /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ + ignoreCase?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter"; +} + +export function keywordMarkerTokenFilterSerializer(item: KeywordMarkerTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + keywords: item["keywords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + }; +} + +export function keywordMarkerTokenFilterDeserializer(item: any): KeywordMarkerTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + keywords: item["keywords"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + }; +} + +/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */ +export interface LengthTokenFilter extends TokenFilter { + /** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */ + minLength?: number; + /** The maximum length in characters. Default and maximum is 300. */ + maxLength?: number; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.LengthTokenFilter"; +} + +export function lengthTokenFilterSerializer(item: LengthTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + min: item["minLength"], + max: item["maxLength"], + }; +} + +export function lengthTokenFilterDeserializer(item: any): LengthTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + minLength: item["min"], + maxLength: item["max"], + }; +} + +/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */ +export interface LimitTokenFilter extends TokenFilter { + /** The maximum number of tokens to produce. Default is 1. */ + maxTokenCount?: number; + /** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */ + consumeAllTokens?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; +} + +export function limitTokenFilterSerializer(item: LimitTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxTokenCount: item["maxTokenCount"], + consumeAllTokens: item["consumeAllTokens"], + }; +} + +export function limitTokenFilterDeserializer(item: any): LimitTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxTokenCount: item["maxTokenCount"], + consumeAllTokens: item["consumeAllTokens"], + }; +} + +/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */ +export interface NGramTokenFilter extends TokenFilter { + /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */ + minGram?: number; + /** The maximum n-gram length. Default is 2. Maximum is 300. */ + maxGram?: number; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2"; +} + +export function nGramTokenFilterSerializer(item: NGramTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + }; +} + +export function nGramTokenFilterDeserializer(item: any): NGramTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + minGram: item["minGram"], + maxGram: item["maxGram"], + }; +} + +/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */ +export interface PatternCaptureTokenFilter extends TokenFilter { + /** A list of patterns to match against each token. */ + patterns: string[]; + /** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */ + preserveOriginal?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter"; +} + +export function patternCaptureTokenFilterSerializer(item: PatternCaptureTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + patterns: item["patterns"].map((p: any) => { + return p; + }), + preserveOriginal: item["preserveOriginal"], + }; +} + +export function patternCaptureTokenFilterDeserializer(item: any): PatternCaptureTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + patterns: item["patterns"].map((p: any) => { + return p; + }), + preserveOriginal: item["preserveOriginal"], + }; +} + +/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. */ +export interface PatternReplaceTokenFilter extends TokenFilter { + /** A regular expression pattern. */ + pattern: string; + /** The replacement text. */ + replacement: string; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter"; +} + +export function patternReplaceTokenFilterSerializer(item: PatternReplaceTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +export function patternReplaceTokenFilterDeserializer(item: any): PatternReplaceTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */ +export interface PhoneticTokenFilter extends TokenFilter { + /** The phonetic encoder to use. Default is "metaphone". */ + encoder?: PhoneticEncoder; + /** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */ + replaceOriginalTokens?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter"; +} + +export function phoneticTokenFilterSerializer(item: PhoneticTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + encoder: item["encoder"], + replace: item["replaceOriginalTokens"], + }; +} + +export function phoneticTokenFilterDeserializer(item: any): PhoneticTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + encoder: item["encoder"], + replaceOriginalTokens: item["replace"], + }; +} + +/** Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. */ +export enum KnownPhoneticEncoder { + /** Encodes a token into a Metaphone value. */ + Metaphone = "metaphone", + /** Encodes a token into a double metaphone value. */ + DoubleMetaphone = "doubleMetaphone", + /** Encodes a token into a Soundex value. */ + Soundex = "soundex", + /** Encodes a token into a Refined Soundex value. */ + RefinedSoundex = "refinedSoundex", + /** Encodes a token into a Caverphone 1.0 value. */ + Caverphone1 = "caverphone1", + /** Encodes a token into a Caverphone 2.0 value. */ + Caverphone2 = "caverphone2", + /** Encodes a token into a Cologne Phonetic value. */ + Cologne = "cologne", + /** Encodes a token into a NYSIIS value. */ + Nysiis = "nysiis", + /** Encodes a token using the Kölner Phonetik algorithm. */ + KoelnerPhonetik = "koelnerPhonetik", + /** Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. */ + HaasePhonetik = "haasePhonetik", + /** Encodes a token into a Beider-Morse value. */ + BeiderMorse = "beiderMorse", +} + +/** + * Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. \ + * {@link KnownPhoneticEncoder} can be used interchangeably with PhoneticEncoder, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **metaphone**: Encodes a token into a Metaphone value. \ + * **doubleMetaphone**: Encodes a token into a double metaphone value. \ + * **soundex**: Encodes a token into a Soundex value. \ + * **refinedSoundex**: Encodes a token into a Refined Soundex value. \ + * **caverphone1**: Encodes a token into a Caverphone 1.0 value. \ + * **caverphone2**: Encodes a token into a Caverphone 2.0 value. \ + * **cologne**: Encodes a token into a Cologne Phonetic value. \ + * **nysiis**: Encodes a token into a NYSIIS value. \ + * **koelnerPhonetik**: Encodes a token using the Kölner Phonetik algorithm. \ + * **haasePhonetik**: Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. \ + * **beiderMorse**: Encodes a token into a Beider-Morse value. + */ +export type PhoneticEncoder = string; + +/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */ +export interface ShingleTokenFilter extends TokenFilter { + /** The maximum shingle size. Default and minimum value is 2. */ + maxShingleSize?: number; + /** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */ + minShingleSize?: number; + /** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */ + outputUnigrams?: boolean; + /** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */ + outputUnigramsIfNoShingles?: boolean; + /** The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). */ + tokenSeparator?: string; + /** The string to insert for each position at which there is no token. Default is an underscore ("_"). */ + filterToken?: string; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter"; +} + +export function shingleTokenFilterSerializer(item: ShingleTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + maxShingleSize: item["maxShingleSize"], + minShingleSize: item["minShingleSize"], + outputUnigrams: item["outputUnigrams"], + outputUnigramsIfNoShingles: item["outputUnigramsIfNoShingles"], + tokenSeparator: item["tokenSeparator"], + filterToken: item["filterToken"], + }; +} + +export function shingleTokenFilterDeserializer(item: any): ShingleTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + maxShingleSize: item["maxShingleSize"], + minShingleSize: item["minShingleSize"], + outputUnigrams: item["outputUnigrams"], + outputUnigramsIfNoShingles: item["outputUnigramsIfNoShingles"], + tokenSeparator: item["tokenSeparator"], + filterToken: item["filterToken"], + }; +} + +/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */ +export interface SnowballTokenFilter extends TokenFilter { + /** The language to use. */ + language: SnowballTokenFilterLanguage; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter"; +} + +export function snowballTokenFilterSerializer(item: SnowballTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + language: item["language"], + }; +} + +export function snowballTokenFilterDeserializer(item: any): SnowballTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + language: item["language"], + }; +} + +/** The language to use for a Snowball token filter. */ +export enum KnownSnowballTokenFilterLanguage { + /** Selects the Lucene Snowball stemming tokenizer for Armenian. */ + Armenian = "armenian", + /** Selects the Lucene Snowball stemming tokenizer for Basque. */ + Basque = "basque", + /** Selects the Lucene Snowball stemming tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Lucene Snowball stemming tokenizer for Danish. */ + Danish = "danish", + /** Selects the Lucene Snowball stemming tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Lucene Snowball stemming tokenizer for English. */ + English = "english", + /** Selects the Lucene Snowball stemming tokenizer for Finnish. */ + Finnish = "finnish", + /** Selects the Lucene Snowball stemming tokenizer for French. */ + French = "french", + /** Selects the Lucene Snowball stemming tokenizer for German. */ + German = "german", + /** Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. */ + German2 = "german2", + /** Selects the Lucene Snowball stemming tokenizer for Hungarian. */ + Hungarian = "hungarian", + /** Selects the Lucene Snowball stemming tokenizer for Italian. */ + Italian = "italian", + /** Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. */ + Kp = "kp", + /** Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. */ + Lovins = "lovins", + /** Selects the Lucene Snowball stemming tokenizer for Norwegian. */ + Norwegian = "norwegian", + /** Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. */ + Porter = "porter", + /** Selects the Lucene Snowball stemming tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Lucene Snowball stemming tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Lucene Snowball stemming tokenizer for Russian. */ + Russian = "russian", + /** Selects the Lucene Snowball stemming tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Lucene Snowball stemming tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Lucene Snowball stemming tokenizer for Turkish. */ + Turkish = "turkish", +} + +/** + * The language to use for a Snowball token filter. \ + * {@link KnownSnowballTokenFilterLanguage} can be used interchangeably with SnowballTokenFilterLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **armenian**: Selects the Lucene Snowball stemming tokenizer for Armenian. \ + * **basque**: Selects the Lucene Snowball stemming tokenizer for Basque. \ + * **catalan**: Selects the Lucene Snowball stemming tokenizer for Catalan. \ + * **danish**: Selects the Lucene Snowball stemming tokenizer for Danish. \ + * **dutch**: Selects the Lucene Snowball stemming tokenizer for Dutch. \ + * **english**: Selects the Lucene Snowball stemming tokenizer for English. \ + * **finnish**: Selects the Lucene Snowball stemming tokenizer for Finnish. \ + * **french**: Selects the Lucene Snowball stemming tokenizer for French. \ + * **german**: Selects the Lucene Snowball stemming tokenizer for German. \ + * **german2**: Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. \ + * **hungarian**: Selects the Lucene Snowball stemming tokenizer for Hungarian. \ + * **italian**: Selects the Lucene Snowball stemming tokenizer for Italian. \ + * **kp**: Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. \ + * **lovins**: Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. \ + * **norwegian**: Selects the Lucene Snowball stemming tokenizer for Norwegian. \ + * **porter**: Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. \ + * **portuguese**: Selects the Lucene Snowball stemming tokenizer for Portuguese. \ + * **romanian**: Selects the Lucene Snowball stemming tokenizer for Romanian. \ + * **russian**: Selects the Lucene Snowball stemming tokenizer for Russian. \ + * **spanish**: Selects the Lucene Snowball stemming tokenizer for Spanish. \ + * **swedish**: Selects the Lucene Snowball stemming tokenizer for Swedish. \ + * **turkish**: Selects the Lucene Snowball stemming tokenizer for Turkish. + */ +export type SnowballTokenFilterLanguage = string; + +/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */ +export interface StemmerTokenFilter extends TokenFilter { + /** The language to use. */ + language: StemmerTokenFilterLanguage; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter"; +} + +export function stemmerTokenFilterSerializer(item: StemmerTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + language: item["language"], + }; +} + +export function stemmerTokenFilterDeserializer(item: any): StemmerTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + language: item["language"], + }; +} + +/** The language to use for a stemmer token filter. */ +export enum KnownStemmerTokenFilterLanguage { + /** Selects the Lucene stemming tokenizer for Arabic. */ + Arabic = "arabic", + /** Selects the Lucene stemming tokenizer for Armenian. */ + Armenian = "armenian", + /** Selects the Lucene stemming tokenizer for Basque. */ + Basque = "basque", + /** Selects the Lucene stemming tokenizer for Portuguese (Brazil). */ + Brazilian = "brazilian", + /** Selects the Lucene stemming tokenizer for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the Lucene stemming tokenizer for Catalan. */ + Catalan = "catalan", + /** Selects the Lucene stemming tokenizer for Czech. */ + Czech = "czech", + /** Selects the Lucene stemming tokenizer for Danish. */ + Danish = "danish", + /** Selects the Lucene stemming tokenizer for Dutch. */ + Dutch = "dutch", + /** Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. */ + DutchKp = "dutchKp", + /** Selects the Lucene stemming tokenizer for English. */ + English = "english", + /** Selects the Lucene stemming tokenizer for English that does light stemming. */ + LightEnglish = "lightEnglish", + /** Selects the Lucene stemming tokenizer for English that does minimal stemming. */ + MinimalEnglish = "minimalEnglish", + /** Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. */ + PossessiveEnglish = "possessiveEnglish", + /** Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. */ + Porter2 = "porter2", + /** Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. */ + Lovins = "lovins", + /** Selects the Lucene stemming tokenizer for Finnish. */ + Finnish = "finnish", + /** Selects the Lucene stemming tokenizer for Finnish that does light stemming. */ + LightFinnish = "lightFinnish", + /** Selects the Lucene stemming tokenizer for French. */ + French = "french", + /** Selects the Lucene stemming tokenizer for French that does light stemming. */ + LightFrench = "lightFrench", + /** Selects the Lucene stemming tokenizer for French that does minimal stemming. */ + MinimalFrench = "minimalFrench", + /** Selects the Lucene stemming tokenizer for Galician. */ + Galician = "galician", + /** Selects the Lucene stemming tokenizer for Galician that does minimal stemming. */ + MinimalGalician = "minimalGalician", + /** Selects the Lucene stemming tokenizer for German. */ + German = "german", + /** Selects the Lucene stemming tokenizer that uses the German variant algorithm. */ + German2 = "german2", + /** Selects the Lucene stemming tokenizer for German that does light stemming. */ + LightGerman = "lightGerman", + /** Selects the Lucene stemming tokenizer for German that does minimal stemming. */ + MinimalGerman = "minimalGerman", + /** Selects the Lucene stemming tokenizer for Greek. */ + Greek = "greek", + /** Selects the Lucene stemming tokenizer for Hindi. */ + Hindi = "hindi", + /** Selects the Lucene stemming tokenizer for Hungarian. */ + Hungarian = "hungarian", + /** Selects the Lucene stemming tokenizer for Hungarian that does light stemming. */ + LightHungarian = "lightHungarian", + /** Selects the Lucene stemming tokenizer for Indonesian. */ + Indonesian = "indonesian", + /** Selects the Lucene stemming tokenizer for Irish. */ + Irish = "irish", + /** Selects the Lucene stemming tokenizer for Italian. */ + Italian = "italian", + /** Selects the Lucene stemming tokenizer for Italian that does light stemming. */ + LightItalian = "lightItalian", + /** Selects the Lucene stemming tokenizer for Sorani. */ + Sorani = "sorani", + /** Selects the Lucene stemming tokenizer for Latvian. */ + Latvian = "latvian", + /** Selects the Lucene stemming tokenizer for Norwegian (Bokmål). */ + Norwegian = "norwegian", + /** Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. */ + LightNorwegian = "lightNorwegian", + /** Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. */ + MinimalNorwegian = "minimalNorwegian", + /** Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. */ + LightNynorsk = "lightNynorsk", + /** Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. */ + MinimalNynorsk = "minimalNynorsk", + /** Selects the Lucene stemming tokenizer for Portuguese. */ + Portuguese = "portuguese", + /** Selects the Lucene stemming tokenizer for Portuguese that does light stemming. */ + LightPortuguese = "lightPortuguese", + /** Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. */ + MinimalPortuguese = "minimalPortuguese", + /** Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. */ + PortugueseRslp = "portugueseRslp", + /** Selects the Lucene stemming tokenizer for Romanian. */ + Romanian = "romanian", + /** Selects the Lucene stemming tokenizer for Russian. */ + Russian = "russian", + /** Selects the Lucene stemming tokenizer for Russian that does light stemming. */ + LightRussian = "lightRussian", + /** Selects the Lucene stemming tokenizer for Spanish. */ + Spanish = "spanish", + /** Selects the Lucene stemming tokenizer for Spanish that does light stemming. */ + LightSpanish = "lightSpanish", + /** Selects the Lucene stemming tokenizer for Swedish. */ + Swedish = "swedish", + /** Selects the Lucene stemming tokenizer for Swedish that does light stemming. */ + LightSwedish = "lightSwedish", + /** Selects the Lucene stemming tokenizer for Turkish. */ + Turkish = "turkish", +} + +/** + * The language to use for a stemmer token filter. \ + * {@link KnownStemmerTokenFilterLanguage} can be used interchangeably with StemmerTokenFilterLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic**: Selects the Lucene stemming tokenizer for Arabic. \ + * **armenian**: Selects the Lucene stemming tokenizer for Armenian. \ + * **basque**: Selects the Lucene stemming tokenizer for Basque. \ + * **brazilian**: Selects the Lucene stemming tokenizer for Portuguese (Brazil). \ + * **bulgarian**: Selects the Lucene stemming tokenizer for Bulgarian. \ + * **catalan**: Selects the Lucene stemming tokenizer for Catalan. \ + * **czech**: Selects the Lucene stemming tokenizer for Czech. \ + * **danish**: Selects the Lucene stemming tokenizer for Danish. \ + * **dutch**: Selects the Lucene stemming tokenizer for Dutch. \ + * **dutchKp**: Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. \ + * **english**: Selects the Lucene stemming tokenizer for English. \ + * **lightEnglish**: Selects the Lucene stemming tokenizer for English that does light stemming. \ + * **minimalEnglish**: Selects the Lucene stemming tokenizer for English that does minimal stemming. \ + * **possessiveEnglish**: Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. \ + * **porter2**: Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. \ + * **lovins**: Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. \ + * **finnish**: Selects the Lucene stemming tokenizer for Finnish. \ + * **lightFinnish**: Selects the Lucene stemming tokenizer for Finnish that does light stemming. \ + * **french**: Selects the Lucene stemming tokenizer for French. \ + * **lightFrench**: Selects the Lucene stemming tokenizer for French that does light stemming. \ + * **minimalFrench**: Selects the Lucene stemming tokenizer for French that does minimal stemming. \ + * **galician**: Selects the Lucene stemming tokenizer for Galician. \ + * **minimalGalician**: Selects the Lucene stemming tokenizer for Galician that does minimal stemming. \ + * **german**: Selects the Lucene stemming tokenizer for German. \ + * **german2**: Selects the Lucene stemming tokenizer that uses the German variant algorithm. \ + * **lightGerman**: Selects the Lucene stemming tokenizer for German that does light stemming. \ + * **minimalGerman**: Selects the Lucene stemming tokenizer for German that does minimal stemming. \ + * **greek**: Selects the Lucene stemming tokenizer for Greek. \ + * **hindi**: Selects the Lucene stemming tokenizer for Hindi. \ + * **hungarian**: Selects the Lucene stemming tokenizer for Hungarian. \ + * **lightHungarian**: Selects the Lucene stemming tokenizer for Hungarian that does light stemming. \ + * **indonesian**: Selects the Lucene stemming tokenizer for Indonesian. \ + * **irish**: Selects the Lucene stemming tokenizer for Irish. \ + * **italian**: Selects the Lucene stemming tokenizer for Italian. \ + * **lightItalian**: Selects the Lucene stemming tokenizer for Italian that does light stemming. \ + * **sorani**: Selects the Lucene stemming tokenizer for Sorani. \ + * **latvian**: Selects the Lucene stemming tokenizer for Latvian. \ + * **norwegian**: Selects the Lucene stemming tokenizer for Norwegian (Bokmål). \ + * **lightNorwegian**: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. \ + * **minimalNorwegian**: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. \ + * **lightNynorsk**: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. \ + * **minimalNynorsk**: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. \ + * **portuguese**: Selects the Lucene stemming tokenizer for Portuguese. \ + * **lightPortuguese**: Selects the Lucene stemming tokenizer for Portuguese that does light stemming. \ + * **minimalPortuguese**: Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. \ + * **portugueseRslp**: Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. \ + * **romanian**: Selects the Lucene stemming tokenizer for Romanian. \ + * **russian**: Selects the Lucene stemming tokenizer for Russian. \ + * **lightRussian**: Selects the Lucene stemming tokenizer for Russian that does light stemming. \ + * **spanish**: Selects the Lucene stemming tokenizer for Spanish. \ + * **lightSpanish**: Selects the Lucene stemming tokenizer for Spanish that does light stemming. \ + * **swedish**: Selects the Lucene stemming tokenizer for Swedish. \ + * **lightSwedish**: Selects the Lucene stemming tokenizer for Swedish that does light stemming. \ + * **turkish**: Selects the Lucene stemming tokenizer for Turkish. + */ +export type StemmerTokenFilterLanguage = string; + +/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */ +export interface StemmerOverrideTokenFilter extends TokenFilter { + /** A list of stemming rules in the following format: "word => stem", for example: "ran => run". */ + rules: string[]; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter"; +} + +export function stemmerOverrideTokenFilterSerializer(item: StemmerOverrideTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + rules: item["rules"].map((p: any) => { + return p; + }), + }; +} + +export function stemmerOverrideTokenFilterDeserializer(item: any): StemmerOverrideTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + rules: item["rules"].map((p: any) => { + return p; + }), + }; +} + +/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */ +export interface StopwordsTokenFilter extends TokenFilter { + /** The list of stopwords. This property and the stopwords list property cannot both be set. */ + stopwords?: string[]; + /** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */ + stopwordsList?: StopwordsList; + /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */ + ignoreCase?: boolean; + /** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */ + removeTrailingStopWords?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter"; +} + +export function stopwordsTokenFilterSerializer(item: StopwordsTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + stopwordsList: item["stopwordsList"], + ignoreCase: item["ignoreCase"], + removeTrailing: item["removeTrailingStopWords"], + }; +} + +export function stopwordsTokenFilterDeserializer(item: any): StopwordsTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + stopwords: !item["stopwords"] + ? item["stopwords"] + : item["stopwords"].map((p: any) => { + return p; + }), + stopwordsList: item["stopwordsList"], + ignoreCase: item["ignoreCase"], + removeTrailingStopWords: item["removeTrailing"], + }; +} + +/** Identifies a predefined list of language-specific stopwords. */ +export enum KnownStopwordsList { + /** Selects the stopword list for Arabic. */ + Arabic = "arabic", + /** Selects the stopword list for Armenian. */ + Armenian = "armenian", + /** Selects the stopword list for Basque. */ + Basque = "basque", + /** Selects the stopword list for Portuguese (Brazil). */ + Brazilian = "brazilian", + /** Selects the stopword list for Bulgarian. */ + Bulgarian = "bulgarian", + /** Selects the stopword list for Catalan. */ + Catalan = "catalan", + /** Selects the stopword list for Czech. */ + Czech = "czech", + /** Selects the stopword list for Danish. */ + Danish = "danish", + /** Selects the stopword list for Dutch. */ + Dutch = "dutch", + /** Selects the stopword list for English. */ + English = "english", + /** Selects the stopword list for Finnish. */ + Finnish = "finnish", + /** Selects the stopword list for French. */ + French = "french", + /** Selects the stopword list for Galician. */ + Galician = "galician", + /** Selects the stopword list for German. */ + German = "german", + /** Selects the stopword list for Greek. */ + Greek = "greek", + /** Selects the stopword list for Hindi. */ + Hindi = "hindi", + /** Selects the stopword list for Hungarian. */ + Hungarian = "hungarian", + /** Selects the stopword list for Indonesian. */ + Indonesian = "indonesian", + /** Selects the stopword list for Irish. */ + Irish = "irish", + /** Selects the stopword list for Italian. */ + Italian = "italian", + /** Selects the stopword list for Latvian. */ + Latvian = "latvian", + /** Selects the stopword list for Norwegian. */ + Norwegian = "norwegian", + /** Selects the stopword list for Persian. */ + Persian = "persian", + /** Selects the stopword list for Portuguese. */ + Portuguese = "portuguese", + /** Selects the stopword list for Romanian. */ + Romanian = "romanian", + /** Selects the stopword list for Russian. */ + Russian = "russian", + /** Selects the stopword list for Sorani. */ + Sorani = "sorani", + /** Selects the stopword list for Spanish. */ + Spanish = "spanish", + /** Selects the stopword list for Swedish. */ + Swedish = "swedish", + /** Selects the stopword list for Thai. */ + Thai = "thai", + /** Selects the stopword list for Turkish. */ + Turkish = "turkish", +} + +/** + * Identifies a predefined list of language-specific stopwords. \ + * {@link KnownStopwordsList} can be used interchangeably with StopwordsList, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic**: Selects the stopword list for Arabic. \ + * **armenian**: Selects the stopword list for Armenian. \ + * **basque**: Selects the stopword list for Basque. \ + * **brazilian**: Selects the stopword list for Portuguese (Brazil). \ + * **bulgarian**: Selects the stopword list for Bulgarian. \ + * **catalan**: Selects the stopword list for Catalan. \ + * **czech**: Selects the stopword list for Czech. \ + * **danish**: Selects the stopword list for Danish. \ + * **dutch**: Selects the stopword list for Dutch. \ + * **english**: Selects the stopword list for English. \ + * **finnish**: Selects the stopword list for Finnish. \ + * **french**: Selects the stopword list for French. \ + * **galician**: Selects the stopword list for Galician. \ + * **german**: Selects the stopword list for German. \ + * **greek**: Selects the stopword list for Greek. \ + * **hindi**: Selects the stopword list for Hindi. \ + * **hungarian**: Selects the stopword list for Hungarian. \ + * **indonesian**: Selects the stopword list for Indonesian. \ + * **irish**: Selects the stopword list for Irish. \ + * **italian**: Selects the stopword list for Italian. \ + * **latvian**: Selects the stopword list for Latvian. \ + * **norwegian**: Selects the stopword list for Norwegian. \ + * **persian**: Selects the stopword list for Persian. \ + * **portuguese**: Selects the stopword list for Portuguese. \ + * **romanian**: Selects the stopword list for Romanian. \ + * **russian**: Selects the stopword list for Russian. \ + * **sorani**: Selects the stopword list for Sorani. \ + * **spanish**: Selects the stopword list for Spanish. \ + * **swedish**: Selects the stopword list for Swedish. \ + * **thai**: Selects the stopword list for Thai. \ + * **turkish**: Selects the stopword list for Turkish. + */ +export type StopwordsList = string; + +/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */ +export interface SynonymTokenFilter extends TokenFilter { + /** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */ + synonyms: string[]; + /** A value indicating whether to case-fold input for matching. Default is false. */ + ignoreCase?: boolean; + /** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */ + expand?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter"; +} + +export function synonymTokenFilterSerializer(item: SynonymTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + synonyms: item["synonyms"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + expand: item["expand"], + }; +} + +export function synonymTokenFilterDeserializer(item: any): SynonymTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + synonyms: item["synonyms"].map((p: any) => { + return p; + }), + ignoreCase: item["ignoreCase"], + expand: item["expand"], + }; +} + +/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */ +export interface TruncateTokenFilter extends TokenFilter { + /** The length at which terms will be truncated. Default and maximum is 300. */ + length?: number; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter"; +} + +export function truncateTokenFilterSerializer(item: TruncateTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + length: item["length"], + }; +} + +export function truncateTokenFilterDeserializer(item: any): TruncateTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + length: item["length"], + }; +} + +/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */ +export interface UniqueTokenFilter extends TokenFilter { + /** A value indicating whether to remove duplicates only at the same position. Default is false. */ + onlyOnSamePosition?: boolean; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter"; +} + +export function uniqueTokenFilterSerializer(item: UniqueTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + onlyOnSamePosition: item["onlyOnSamePosition"], + }; +} + +export function uniqueTokenFilterDeserializer(item: any): UniqueTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + onlyOnSamePosition: item["onlyOnSamePosition"], + }; +} + +/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */ +export interface WordDelimiterTokenFilter extends TokenFilter { + /** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. */ + generateWordParts?: boolean; + /** A value indicating whether to generate number subwords. Default is true. */ + generateNumberParts?: boolean; + /** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. */ + catenateWords?: boolean; + /** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. */ + catenateNumbers?: boolean; + /** A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. */ + catenateAll?: boolean; + /** A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. */ + splitOnCaseChange?: boolean; + /** A value indicating whether original words will be preserved and added to the subword list. Default is false. */ + preserveOriginal?: boolean; + /** A value indicating whether to split on numbers. For example, if this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. */ + splitOnNumerics?: boolean; + /** A value indicating whether to remove trailing "'s" for each subword. Default is true. */ + stemEnglishPossessive?: boolean; + /** A list of tokens to protect from being delimited. */ + protectedWords?: string[]; + /** A URI fragment specifying the type of token filter. */ + odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter"; +} + +export function wordDelimiterTokenFilterSerializer(item: WordDelimiterTokenFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + generateWordParts: item["generateWordParts"], + generateNumberParts: item["generateNumberParts"], + catenateWords: item["catenateWords"], + catenateNumbers: item["catenateNumbers"], + catenateAll: item["catenateAll"], + splitOnCaseChange: item["splitOnCaseChange"], + preserveOriginal: item["preserveOriginal"], + splitOnNumerics: item["splitOnNumerics"], + stemEnglishPossessive: item["stemEnglishPossessive"], + protectedWords: !item["protectedWords"] + ? item["protectedWords"] + : item["protectedWords"].map((p: any) => { + return p; + }), + }; +} + +export function wordDelimiterTokenFilterDeserializer(item: any): WordDelimiterTokenFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + generateWordParts: item["generateWordParts"], + generateNumberParts: item["generateNumberParts"], + catenateWords: item["catenateWords"], + catenateNumbers: item["catenateNumbers"], + catenateAll: item["catenateAll"], + splitOnCaseChange: item["splitOnCaseChange"], + preserveOriginal: item["preserveOriginal"], + splitOnNumerics: item["splitOnNumerics"], + stemEnglishPossessive: item["stemEnglishPossessive"], + protectedWords: !item["protectedWords"] + ? item["protectedWords"] + : item["protectedWords"].map((p: any) => { + return p; + }), + }; +} + +export function charFilterUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return charFilterUnionSerializer(item); + }); +} + +export function charFilterUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return charFilterUnionDeserializer(item); + }); +} + +/** Base type for character filters. */ +export interface CharFilter { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.MappingCharFilter, #Microsoft.Azure.Search.PatternReplaceCharFilter */ + odatatype: string; + /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function charFilterSerializer(item: CharFilter): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function charFilterDeserializer(item: any): CharFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for CharFilterUnion */ +export type CharFilterUnion = MappingCharFilter | PatternReplaceCharFilter | CharFilter; + +export function charFilterUnionSerializer(item: CharFilterUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.MappingCharFilter": + return mappingCharFilterSerializer(item as MappingCharFilter); + + case "#Microsoft.Azure.Search.PatternReplaceCharFilter": + return patternReplaceCharFilterSerializer(item as PatternReplaceCharFilter); + + default: + return charFilterSerializer(item); + } +} + +export function charFilterUnionDeserializer(item: any): CharFilterUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.MappingCharFilter": + return mappingCharFilterDeserializer(item as MappingCharFilter); + + case "#Microsoft.Azure.Search.PatternReplaceCharFilter": + return patternReplaceCharFilterDeserializer(item as PatternReplaceCharFilter); + + default: + return charFilterDeserializer(item); + } +} + +/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */ +export interface MappingCharFilter extends CharFilter { + /** A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). */ + mappings: string[]; + /** A URI fragment specifying the type of char filter. */ + odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; +} + +export function mappingCharFilterSerializer(item: MappingCharFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + mappings: item["mappings"].map((p: any) => { + return p; + }), + }; +} + +export function mappingCharFilterDeserializer(item: any): MappingCharFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + mappings: item["mappings"].map((p: any) => { + return p; + }), + }; +} + +/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. */ +export interface PatternReplaceCharFilter extends CharFilter { + /** A regular expression pattern. */ + pattern: string; + /** The replacement text. */ + replacement: string; + /** A URI fragment specifying the type of char filter. */ + odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter"; +} + +export function patternReplaceCharFilterSerializer(item: PatternReplaceCharFilter): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +export function patternReplaceCharFilterDeserializer(item: any): PatternReplaceCharFilter { + return { + odatatype: item["@odata.type"], + name: item["name"], + pattern: item["pattern"], + replacement: item["replacement"], + }; +} + +export function lexicalNormalizerUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return lexicalNormalizerUnionSerializer(item); + }); +} + +export function lexicalNormalizerUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return lexicalNormalizerUnionDeserializer(item); + }); +} + +/** Base type for normalizers. */ +export interface LexicalNormalizer { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.CustomNormalizer */ + odatatype: string; + /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */ + name: string; +} + +export function lexicalNormalizerSerializer(item: LexicalNormalizer): any { + return { "@odata.type": item["odatatype"], name: item["name"] }; +} + +export function lexicalNormalizerDeserializer(item: any): LexicalNormalizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + }; +} + +/** Alias for LexicalNormalizerUnion */ +export type LexicalNormalizerUnion = CustomNormalizer | LexicalNormalizer; + +export function lexicalNormalizerUnionSerializer(item: LexicalNormalizerUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomNormalizer": + return customNormalizerSerializer(item as CustomNormalizer); + + default: + return lexicalNormalizerSerializer(item); + } +} + +export function lexicalNormalizerUnionDeserializer(item: any): LexicalNormalizerUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.CustomNormalizer": + return customNormalizerDeserializer(item as CustomNormalizer); + + default: + return lexicalNormalizerDeserializer(item); + } +} + +/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */ +export interface CustomNormalizer extends LexicalNormalizer { + /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ + tokenFilters?: TokenFilterName[]; + /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ + charFilters?: CharFilterName[]; + /** A URI fragment specifying the type of normalizer. */ + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; +} + +export function customNormalizerSerializer(item: CustomNormalizer): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +export function customNormalizerDeserializer(item: any): CustomNormalizer { + return { + odatatype: item["@odata.type"], + name: item["name"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */ +export interface SimilarityAlgorithm { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.ClassicSimilarity, #Microsoft.Azure.Search.BM25Similarity */ + odatatype: string; +} + +export function similarityAlgorithmSerializer(item: SimilarityAlgorithm): any { + return { "@odata.type": item["odatatype"] }; +} + +export function similarityAlgorithmDeserializer(item: any): SimilarityAlgorithm { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for SimilarityAlgorithmUnion */ +export type SimilarityAlgorithmUnion = ClassicSimilarity | BM25Similarity | SimilarityAlgorithm; + +export function similarityAlgorithmUnionSerializer(item: SimilarityAlgorithmUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicSimilarity": + return classicSimilaritySerializer(item as ClassicSimilarity); + + case "#Microsoft.Azure.Search.BM25Similarity": + return bm25SimilaritySerializer(item as BM25Similarity); + + default: + return similarityAlgorithmSerializer(item); + } +} + +export function similarityAlgorithmUnionDeserializer(item: any): SimilarityAlgorithmUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.ClassicSimilarity": + return classicSimilarityDeserializer(item as ClassicSimilarity); + + case "#Microsoft.Azure.Search.BM25Similarity": + return bm25SimilarityDeserializer(item as BM25Similarity); + + default: + return similarityAlgorithmDeserializer(item); + } +} + +/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */ +export interface ClassicSimilarity extends SimilarityAlgorithm { + /** The discriminator for derived types. */ + odatatype: "#Microsoft.Azure.Search.ClassicSimilarity"; +} + +export function classicSimilaritySerializer(item: ClassicSimilarity): any { + return { "@odata.type": item["odatatype"] }; +} + +export function classicSimilarityDeserializer(item: any): ClassicSimilarity { + return { + odatatype: item["@odata.type"], + }; +} + +/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */ +export interface BM25Similarity extends SimilarityAlgorithm { + /** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */ + k1?: number; + /** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */ + b?: number; + /** The discriminator for derived types. */ + odatatype: "#Microsoft.Azure.Search.BM25Similarity"; +} + +export function bm25SimilaritySerializer(item: BM25Similarity): any { + return { "@odata.type": item["odatatype"], k1: item["k1"], b: item["b"] }; +} + +export function bm25SimilarityDeserializer(item: any): BM25Similarity { + return { + odatatype: item["@odata.type"], + k1: item["k1"], + b: item["b"], + }; +} + +/** Defines parameters for a search index that influence semantic capabilities. */ +export interface SemanticSearch { + /** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */ + defaultConfigurationName?: string; + /** The semantic configurations for the index. */ + configurations?: SemanticConfiguration[]; +} + +export function semanticSearchSerializer(item: SemanticSearch): any { + return { + defaultConfiguration: item["defaultConfigurationName"], + configurations: !item["configurations"] + ? item["configurations"] + : semanticConfigurationArraySerializer(item["configurations"]), + }; +} + +export function semanticSearchDeserializer(item: any): SemanticSearch { + return { + defaultConfigurationName: item["defaultConfiguration"], + configurations: !item["configurations"] + ? item["configurations"] + : semanticConfigurationArrayDeserializer(item["configurations"]), + }; +} + +export function semanticConfigurationArraySerializer(result: Array): any[] { + return result.map((item) => { + return semanticConfigurationSerializer(item); + }); +} + +export function semanticConfigurationArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return semanticConfigurationDeserializer(item); + }); +} + +/** Defines a specific configuration to be used in the context of semantic capabilities. */ +export interface SemanticConfiguration { + /** The name of the semantic configuration. */ + name: string; + /** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */ + prioritizedFields: SemanticPrioritizedFields; + /** Specifies the score type to be used for the sort order of the search results. */ + rankingOrder?: RankingOrder; + /** Determines which semantic or query rewrite models to use during model flighting/upgrades. */ + flightingOptIn?: boolean; +} + +export function semanticConfigurationSerializer(item: SemanticConfiguration): any { + return { + name: item["name"], + prioritizedFields: semanticPrioritizedFieldsSerializer(item["prioritizedFields"]), + rankingOrder: item["rankingOrder"], + flightingOptIn: item["flightingOptIn"], + }; +} + +export function semanticConfigurationDeserializer(item: any): SemanticConfiguration { + return { + name: item["name"], + prioritizedFields: semanticPrioritizedFieldsDeserializer(item["prioritizedFields"]), + rankingOrder: item["rankingOrder"], + flightingOptIn: item["flightingOptIn"], + }; +} + +/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */ +export interface SemanticPrioritizedFields { + /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */ + titleField?: SemanticField; + /** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */ + contentFields?: SemanticField[]; + /** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */ + keywordsFields?: SemanticField[]; +} + +export function semanticPrioritizedFieldsSerializer(item: SemanticPrioritizedFields): any { + return { + titleField: !item["titleField"] + ? item["titleField"] + : semanticFieldSerializer(item["titleField"]), + prioritizedContentFields: !item["contentFields"] + ? item["contentFields"] + : semanticFieldArraySerializer(item["contentFields"]), + prioritizedKeywordsFields: !item["keywordsFields"] + ? item["keywordsFields"] + : semanticFieldArraySerializer(item["keywordsFields"]), + }; +} + +export function semanticPrioritizedFieldsDeserializer(item: any): SemanticPrioritizedFields { + return { + titleField: !item["titleField"] + ? item["titleField"] + : semanticFieldDeserializer(item["titleField"]), + contentFields: !item["prioritizedContentFields"] + ? item["prioritizedContentFields"] + : semanticFieldArrayDeserializer(item["prioritizedContentFields"]), + keywordsFields: !item["prioritizedKeywordsFields"] + ? item["prioritizedKeywordsFields"] + : semanticFieldArrayDeserializer(item["prioritizedKeywordsFields"]), + }; +} + +/** A field that is used as part of the semantic configuration. */ +export interface SemanticField { + /** File name */ + name: string; +} + +export function semanticFieldSerializer(item: SemanticField): any { + return { fieldName: item["name"] }; +} + +export function semanticFieldDeserializer(item: any): SemanticField { + return { + name: item["fieldName"], + }; +} + +export function semanticFieldArraySerializer(result: Array): any[] { + return result.map((item) => { + return semanticFieldSerializer(item); + }); +} + +export function semanticFieldArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return semanticFieldDeserializer(item); + }); +} + +/** Represents score to use for sort order of documents. */ +export enum KnownRankingOrder { + /** Sets sort order as BoostedRerankerScore */ + BoostedRerankerScore = "BoostedRerankerScore", + /** Sets sort order as ReRankerScore */ + RerankerScore = "RerankerScore", +} + +/** + * Represents score to use for sort order of documents. \ + * {@link KnownRankingOrder} can be used interchangeably with RankingOrder, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \ + * **RerankerScore**: Sets sort order as ReRankerScore + */ +export type RankingOrder = string; + +/** Contains configuration options related to vector search. */ +export interface VectorSearch { + /** Defines combinations of configurations to use with vector search. */ + profiles?: VectorSearchProfile[]; + /** Contains configuration options specific to the algorithm used during indexing or querying. */ + algorithms?: VectorSearchAlgorithmConfigurationUnion[]; + /** Contains configuration options on how to vectorize text vector queries. */ + vectorizers?: VectorSearchVectorizerUnion[]; + /** Contains configuration options specific to the compression method used during indexing or querying. */ + compressions?: VectorSearchCompressionUnion[]; +} + +export function vectorSearchSerializer(item: VectorSearch): any { + return { + profiles: !item["profiles"] + ? item["profiles"] + : vectorSearchProfileArraySerializer(item["profiles"]), + algorithms: !item["algorithms"] + ? item["algorithms"] + : vectorSearchAlgorithmConfigurationUnionArraySerializer(item["algorithms"]), + vectorizers: !item["vectorizers"] + ? item["vectorizers"] + : vectorSearchVectorizerUnionArraySerializer(item["vectorizers"]), + compressions: !item["compressions"] + ? item["compressions"] + : vectorSearchCompressionUnionArraySerializer(item["compressions"]), + }; +} + +export function vectorSearchDeserializer(item: any): VectorSearch { + return { + profiles: !item["profiles"] + ? item["profiles"] + : vectorSearchProfileArrayDeserializer(item["profiles"]), + algorithms: !item["algorithms"] + ? item["algorithms"] + : vectorSearchAlgorithmConfigurationUnionArrayDeserializer(item["algorithms"]), + vectorizers: !item["vectorizers"] + ? item["vectorizers"] + : vectorSearchVectorizerUnionArrayDeserializer(item["vectorizers"]), + compressions: !item["compressions"] + ? item["compressions"] + : vectorSearchCompressionUnionArrayDeserializer(item["compressions"]), + }; +} + +export function vectorSearchProfileArraySerializer(result: Array): any[] { + return result.map((item) => { + return vectorSearchProfileSerializer(item); + }); +} + +export function vectorSearchProfileArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return vectorSearchProfileDeserializer(item); + }); +} + +/** Defines a combination of configurations to use with vector search. */ +export interface VectorSearchProfile { + /** The name to associate with this particular vector search profile. */ + name: string; + /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */ + algorithmConfigurationName: string; + /** The name of the vectorization being configured for use with vector search. */ + vectorizerName?: string; + /** The name of the compression method configuration that specifies the compression method and optional parameters. */ + compressionName?: string; +} + +export function vectorSearchProfileSerializer(item: VectorSearchProfile): any { + return { + name: item["name"], + algorithm: item["algorithmConfigurationName"], + vectorizer: item["vectorizerName"], + compression: item["compressionName"], + }; +} + +export function vectorSearchProfileDeserializer(item: any): VectorSearchProfile { + return { + name: item["name"], + algorithmConfigurationName: item["algorithm"], + vectorizerName: item["vectorizer"], + compressionName: item["compression"], + }; +} + +export function vectorSearchAlgorithmConfigurationUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchAlgorithmConfigurationUnionSerializer(item); + }); +} + +export function vectorSearchAlgorithmConfigurationUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchAlgorithmConfigurationUnionDeserializer(item); + }); +} + +/** Contains configuration options specific to the algorithm used during indexing or querying. */ +export interface VectorSearchAlgorithmConfiguration { + /** The name to associate with this particular configuration. */ + name: string; + /** Type of VectorSearchAlgorithmConfiguration. */ + /** The discriminator possible values: hnsw, exhaustiveKnn */ + kind: VectorSearchAlgorithmKind; +} + +export function vectorSearchAlgorithmConfigurationSerializer( + item: VectorSearchAlgorithmConfiguration, +): any { + return { name: item["name"], kind: item["kind"] }; +} + +export function vectorSearchAlgorithmConfigurationDeserializer( + item: any, +): VectorSearchAlgorithmConfiguration { + return { + name: item["name"], + kind: item["kind"], + }; +} + +/** Alias for VectorSearchAlgorithmConfigurationUnion */ +export type VectorSearchAlgorithmConfigurationUnion = + | HnswAlgorithmConfiguration + | ExhaustiveKnnAlgorithmConfiguration + | VectorSearchAlgorithmConfiguration; + +export function vectorSearchAlgorithmConfigurationUnionSerializer( + item: VectorSearchAlgorithmConfigurationUnion, +): any { + switch (item.kind) { + case "hnsw": + return hnswAlgorithmConfigurationSerializer(item as HnswAlgorithmConfiguration); + + case "exhaustiveKnn": + return exhaustiveKnnAlgorithmConfigurationSerializer( + item as ExhaustiveKnnAlgorithmConfiguration, + ); + + default: + return vectorSearchAlgorithmConfigurationSerializer(item); + } +} + +export function vectorSearchAlgorithmConfigurationUnionDeserializer( + item: any, +): VectorSearchAlgorithmConfigurationUnion { + switch (item.kind) { + case "hnsw": + return hnswAlgorithmConfigurationDeserializer(item as HnswAlgorithmConfiguration); + + case "exhaustiveKnn": + return exhaustiveKnnAlgorithmConfigurationDeserializer( + item as ExhaustiveKnnAlgorithmConfiguration, + ); + + default: + return vectorSearchAlgorithmConfigurationDeserializer(item); + } +} + +/** The algorithm used for indexing and querying. */ +export enum KnownVectorSearchAlgorithmKind { + /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */ + Hnsw = "hnsw", + /** Exhaustive KNN algorithm which will perform brute-force search. */ + ExhaustiveKnn = "exhaustiveKnn", +} + +/** + * The algorithm used for indexing and querying. \ + * {@link KnownVectorSearchAlgorithmKind} can be used interchangeably with VectorSearchAlgorithmKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **hnsw**: HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. \ + * **exhaustiveKnn**: Exhaustive KNN algorithm which will perform brute-force search. + */ +export type VectorSearchAlgorithmKind = string; + +/** Contains configuration options specific to the HNSW approximate nearest neighbors algorithm used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search speed and accuracy. */ +export interface HnswAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration { + /** Contains the parameters specific to HNSW algorithm. */ + parameters?: HnswParameters; + /** The name of the kind of algorithm being configured for use with vector search. */ + kind: "hnsw"; +} + +export function hnswAlgorithmConfigurationSerializer(item: HnswAlgorithmConfiguration): any { + return { + name: item["name"], + kind: item["kind"], + hnswParameters: !item["parameters"] + ? item["parameters"] + : hnswParametersSerializer(item["parameters"]), + }; +} + +export function hnswAlgorithmConfigurationDeserializer(item: any): HnswAlgorithmConfiguration { + return { + name: item["name"], + kind: item["kind"], + parameters: !item["hnswParameters"] + ? item["hnswParameters"] + : hnswParametersDeserializer(item["hnswParameters"]), + }; +} + +/** Contains the parameters specific to the HNSW algorithm. */ +export interface HnswParameters { + /** The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time. */ + m?: number; + /** The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns. */ + efConstruction?: number; + /** The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns. */ + efSearch?: number; + /** The similarity metric to use for vector comparisons. */ + metric?: VectorSearchAlgorithmMetric; +} + +export function hnswParametersSerializer(item: HnswParameters): any { + return { + m: item["m"], + efConstruction: item["efConstruction"], + efSearch: item["efSearch"], + metric: item["metric"], + }; +} + +export function hnswParametersDeserializer(item: any): HnswParameters { + return { + m: item["m"], + efConstruction: item["efConstruction"], + efSearch: item["efSearch"], + metric: item["metric"], + }; +} + +/** The similarity metric to use for vector comparisons. It is recommended to choose the same similarity metric as the embedding model was trained on. */ +export enum KnownVectorSearchAlgorithmMetric { + /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */ + Cosine = "cosine", + /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */ + Euclidean = "euclidean", + /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */ + DotProduct = "dotProduct", + /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */ + Hamming = "hamming", +} + +/** + * The similarity metric to use for vector comparisons. It is recommended to choose the same similarity metric as the embedding model was trained on. \ + * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \ + * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \ + * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \ + * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. + */ +export type VectorSearchAlgorithmMetric = string; + +/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */ +export interface ExhaustiveKnnAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration { + /** Contains the parameters specific to exhaustive KNN algorithm. */ + parameters?: ExhaustiveKnnParameters; + /** The name of the kind of algorithm being configured for use with vector search. */ + kind: "exhaustiveKnn"; +} + +export function exhaustiveKnnAlgorithmConfigurationSerializer( + item: ExhaustiveKnnAlgorithmConfiguration, +): any { + return { + name: item["name"], + kind: item["kind"], + exhaustiveKnnParameters: !item["parameters"] + ? item["parameters"] + : exhaustiveKnnParametersSerializer(item["parameters"]), + }; +} + +export function exhaustiveKnnAlgorithmConfigurationDeserializer( + item: any, +): ExhaustiveKnnAlgorithmConfiguration { + return { + name: item["name"], + kind: item["kind"], + parameters: !item["exhaustiveKnnParameters"] + ? item["exhaustiveKnnParameters"] + : exhaustiveKnnParametersDeserializer(item["exhaustiveKnnParameters"]), + }; +} + +/** Contains the parameters specific to exhaustive KNN algorithm. */ +export interface ExhaustiveKnnParameters { + /** The similarity metric to use for vector comparisons. */ + metric?: VectorSearchAlgorithmMetric; +} + +export function exhaustiveKnnParametersSerializer(item: ExhaustiveKnnParameters): any { + return { metric: item["metric"] }; +} + +export function exhaustiveKnnParametersDeserializer(item: any): ExhaustiveKnnParameters { + return { + metric: item["metric"], + }; +} + +export function vectorSearchVectorizerUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchVectorizerUnionSerializer(item); + }); +} + +export function vectorSearchVectorizerUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchVectorizerUnionDeserializer(item); + }); +} + +/** Specifies the vectorization method to be used during query time. */ +export interface VectorSearchVectorizer { + /** The name to associate with this particular vectorization method. */ + vectorizerName: string; + /** Type of VectorSearchVectorizer. */ + /** The discriminator possible values: azureOpenAI, customWebApi, aiServicesVision, aml */ + kind: VectorSearchVectorizerKind; +} + +export function vectorSearchVectorizerSerializer(item: VectorSearchVectorizer): any { + return { name: item["vectorizerName"], kind: item["kind"] }; +} + +export function vectorSearchVectorizerDeserializer(item: any): VectorSearchVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + }; +} + +/** Alias for VectorSearchVectorizerUnion */ +export type VectorSearchVectorizerUnion = + | AzureOpenAIVectorizer + | WebApiVectorizer + | AIServicesVisionVectorizer + | AzureMachineLearningVectorizer + | VectorSearchVectorizer; + +export function vectorSearchVectorizerUnionSerializer(item: VectorSearchVectorizerUnion): any { + switch (item.kind) { + case "azureOpenAI": + return azureOpenAIVectorizerSerializer(item as AzureOpenAIVectorizer); + + case "customWebApi": + return webApiVectorizerSerializer(item as WebApiVectorizer); + + case "aiServicesVision": + return aiServicesVisionVectorizerSerializer(item as AIServicesVisionVectorizer); + + case "aml": + return azureMachineLearningVectorizerSerializer(item as AzureMachineLearningVectorizer); + + default: + return vectorSearchVectorizerSerializer(item); + } +} + +export function vectorSearchVectorizerUnionDeserializer(item: any): VectorSearchVectorizerUnion { + switch (item.kind) { + case "azureOpenAI": + return azureOpenAIVectorizerDeserializer(item as AzureOpenAIVectorizer); + + case "customWebApi": + return webApiVectorizerDeserializer(item as WebApiVectorizer); + + case "aiServicesVision": + return aiServicesVisionVectorizerDeserializer(item as AIServicesVisionVectorizer); + + case "aml": + return azureMachineLearningVectorizerDeserializer(item as AzureMachineLearningVectorizer); + + default: + return vectorSearchVectorizerDeserializer(item); + } +} + +/** The vectorization method to be used during query time. */ +export enum KnownVectorSearchVectorizerKind { + /** Generate embeddings using an Azure OpenAI resource at query time. */ + AzureOpenAI = "azureOpenAI", + /** Generate embeddings using a custom web endpoint at query time. */ + CustomWebApi = "customWebApi", + /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */ + AIServicesVision = "aiServicesVision", + /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. */ + AML = "aml", +} + +/** + * The vectorization method to be used during query time. \ + * {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \ + * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \ + * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \ + * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. + */ +export type VectorSearchVectorizerKind = string; + +/** Specifies the Azure OpenAI resource used to vectorize a query string. */ +export interface AzureOpenAIVectorizer extends VectorSearchVectorizer { + /** Contains the parameters specific to Azure OpenAI embedding vectorization. */ + parameters?: AzureOpenAIVectorizerParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "azureOpenAI"; +} + +export function azureOpenAIVectorizerSerializer(item: AzureOpenAIVectorizer): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + azureOpenAIParameters: !item["parameters"] + ? item["parameters"] + : azureOpenAIVectorizerParametersSerializer(item["parameters"]), + }; +} + +export function azureOpenAIVectorizerDeserializer(item: any): AzureOpenAIVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + parameters: !item["azureOpenAIParameters"] + ? item["azureOpenAIParameters"] + : azureOpenAIVectorizerParametersDeserializer(item["azureOpenAIParameters"]), + }; +} + +/** Specifies the parameters for connecting to the Azure OpenAI resource. */ +export interface AzureOpenAIVectorizerParameters { + /** The resource URI of the Azure OpenAI resource. */ + resourceUrl?: string; + /** ID of the Azure OpenAI model deployment on the designated resource. */ + deploymentName?: string; + /** API key of the designated Azure OpenAI resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** The name of the embedding model that is deployed at the provided deploymentId path. */ + modelName?: AzureOpenAIModelName; +} + +export function azureOpenAIVectorizerParametersSerializer( + item: AzureOpenAIVectorizerParameters, +): any { + return { + resourceUri: item["resourceUrl"], + deploymentId: item["deploymentName"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + modelName: item["modelName"], + }; +} + +export function azureOpenAIVectorizerParametersDeserializer( + item: any, +): AzureOpenAIVectorizerParameters { + return { + resourceUrl: item["resourceUri"], + deploymentName: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + modelName: item["modelName"], + }; +} + +/** The Azure Open AI model name that will be called. */ +export enum KnownAzureOpenAIModelName { + /** TextEmbeddingAda002 model. */ + TextEmbeddingAda002 = "text-embedding-ada-002", + /** TextEmbedding3Large model. */ + TextEmbedding3Large = "text-embedding-3-large", + /** TextEmbedding3Small model. */ + TextEmbedding3Small = "text-embedding-3-small", +} + +/** + * The Azure Open AI model name that will be called. \ + * {@link KnownAzureOpenAIModelName} can be used interchangeably with AzureOpenAIModelName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text-embedding-ada-002**: TextEmbeddingAda002 model. \ + * **text-embedding-3-large**: TextEmbedding3Large model. \ + * **text-embedding-3-small**: TextEmbedding3Small model. + */ +export type AzureOpenAIModelName = string; + +/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */ +export interface WebApiVectorizer extends VectorSearchVectorizer { + /** Specifies the properties of the user-defined vectorizer. */ + webApiParameters?: WebApiVectorizerParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "customWebApi"; +} + +export function webApiVectorizerSerializer(item: WebApiVectorizer): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + customWebApiParameters: !item["webApiParameters"] + ? item["webApiParameters"] + : webApiVectorizerParametersSerializer(item["webApiParameters"]), + }; +} + +export function webApiVectorizerDeserializer(item: any): WebApiVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + webApiParameters: !item["customWebApiParameters"] + ? item["customWebApiParameters"] + : webApiVectorizerParametersDeserializer(item["customWebApiParameters"]), + }; +} + +/** Specifies the properties for connecting to a user-defined vectorizer. */ +export interface WebApiVectorizerParameters { + /** The URI of the Web API providing the vectorizer. */ + url?: string; + /** The headers required to make the HTTP request. */ + httpHeaders?: Record; + /** The method for the HTTP request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; +} + +export function webApiVectorizerParametersSerializer(item: WebApiVectorizerParameters): any { + return { + uri: item["url"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + }; +} + +export function webApiVectorizerParametersDeserializer(item: any): WebApiVectorizerParameters { + return { + url: item["uri"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + }; +} + +/** Clears the identity property of a datasource. */ +export interface AIServicesVisionVectorizer extends VectorSearchVectorizer { + /** Contains the parameters specific to AI Services Vision embedding vectorization. */ + aiServicesVisionParameters?: AIServicesVisionParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "aiServicesVision"; +} + +export function aiServicesVisionVectorizerSerializer(item: AIServicesVisionVectorizer): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + AIServicesVisionParameters: !item["aiServicesVisionParameters"] + ? item["aiServicesVisionParameters"] + : aiServicesVisionParametersSerializer(item["aiServicesVisionParameters"]), + }; +} + +export function aiServicesVisionVectorizerDeserializer(item: any): AIServicesVisionVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + aiServicesVisionParameters: !item["AIServicesVisionParameters"] + ? item["AIServicesVisionParameters"] + : aiServicesVisionParametersDeserializer(item["AIServicesVisionParameters"]), + }; +} + +/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ +export interface AIServicesVisionParameters { + /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ + modelVersion: string; + /** The resource URI of the AI Services resource. */ + resourceUri: string; + /** API key of the designated AI Services resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; +} + +export function aiServicesVisionParametersSerializer(item: AIServicesVisionParameters): any { + return { + modelVersion: item["modelVersion"], + resourceUri: item["resourceUri"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + }; +} + +export function aiServicesVisionParametersDeserializer(item: any): AIServicesVisionParameters { + return { + modelVersion: item["modelVersion"], + resourceUri: item["resourceUri"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + }; +} + +/** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. */ +export interface AzureMachineLearningVectorizer extends VectorSearchVectorizer { + /** Specifies the properties of the AML vectorizer. */ + amlParameters?: AzureMachineLearningParameters; + /** The name of the kind of vectorization method being configured for use with vector search. */ + kind: "aml"; +} + +export function azureMachineLearningVectorizerSerializer( + item: AzureMachineLearningVectorizer, +): any { + return { + name: item["vectorizerName"], + kind: item["kind"], + amlParameters: !item["amlParameters"] + ? item["amlParameters"] + : azureMachineLearningParametersSerializer(item["amlParameters"]), + }; +} + +export function azureMachineLearningVectorizerDeserializer( + item: any, +): AzureMachineLearningVectorizer { + return { + vectorizerName: item["name"], + kind: item["kind"], + amlParameters: !item["amlParameters"] + ? item["amlParameters"] + : azureMachineLearningParametersDeserializer(item["amlParameters"]), + }; +} + +/** Specifies the properties for connecting to an AML vectorizer. */ +export interface AzureMachineLearningParameters { + /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ + scoringUri: string; + /** (Required for key authentication) The key for the AML service. */ + authenticationKey?: string; + /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ + resourceId?: string; + /** (Optional) When specified, indicates the timeout for the http client making the API call. */ + timeout?: string; + /** (Optional for token authentication). The region the AML service is deployed in. */ + region?: string; + /** The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. */ + modelName?: AIFoundryModelCatalogName; +} + +export function azureMachineLearningParametersSerializer( + item: AzureMachineLearningParameters, +): any { + return { + uri: item["scoringUri"], + key: item["authenticationKey"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + modelName: item["modelName"], + }; +} + +export function azureMachineLearningParametersDeserializer( + item: any, +): AzureMachineLearningParameters { + return { + scoringUri: item["uri"], + authenticationKey: item["key"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + modelName: item["modelName"], + }; +} + +/** The name of the embedding model from the Azure AI Foundry Catalog that will be called. */ +export enum KnownAIFoundryModelCatalogName { + /** OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32 */ + OpenAiclipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", + /** OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336 */ + OpenAiclipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", + /** Facebook-DinoV2-Image-Embeddings-ViT-Base */ + FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", + /** Facebook-DinoV2-Image-Embeddings-ViT-Giant */ + FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", + /** Cohere-embed-v3-english */ + CohereEmbedV3English = "Cohere-embed-v3-english", + /** Cohere-embed-v3-multilingual */ + CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", +} + +/** + * The name of the embedding model from the Azure AI Foundry Catalog that will be called. \ + * {@link KnownAIFoundryModelCatalogName} can be used interchangeably with AIFoundryModelCatalogName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32**: OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32 \ + * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336**: OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336 \ + * **Facebook-DinoV2-Image-Embeddings-ViT-Base**: Facebook-DinoV2-Image-Embeddings-ViT-Base \ + * **Facebook-DinoV2-Image-Embeddings-ViT-Giant**: Facebook-DinoV2-Image-Embeddings-ViT-Giant \ + * **Cohere-embed-v3-english**: Cohere-embed-v3-english \ + * **Cohere-embed-v3-multilingual**: Cohere-embed-v3-multilingual + */ +export type AIFoundryModelCatalogName = string; + +export function vectorSearchCompressionUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchCompressionUnionSerializer(item); + }); +} + +export function vectorSearchCompressionUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return vectorSearchCompressionUnionDeserializer(item); + }); +} + +/** Contains configuration options specific to the compression method used during indexing or querying. */ +export interface VectorSearchCompression { + /** The name to associate with this particular configuration. */ + compressionName: string; + /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */ + rerankWithOriginalVectors?: boolean; + /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */ + defaultOversampling?: number; + /** Contains the options for rescoring. */ + rescoringOptions?: RescoringOptions; + /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */ + truncationDimension?: number; + /** Type of VectorSearchCompression. */ + /** The discriminator possible values: scalarQuantization, binaryQuantization */ + kind: VectorSearchCompressionKind; +} + +export function vectorSearchCompressionSerializer(item: VectorSearchCompression): any { + return { + name: item["compressionName"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsSerializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +export function vectorSearchCompressionDeserializer(item: any): VectorSearchCompression { + return { + compressionName: item["name"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsDeserializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +/** Alias for VectorSearchCompressionUnion */ +export type VectorSearchCompressionUnion = + | ScalarQuantizationCompression + | BinaryQuantizationCompression + | VectorSearchCompression; + +export function vectorSearchCompressionUnionSerializer(item: VectorSearchCompressionUnion): any { + switch (item.kind) { + case "scalarQuantization": + return scalarQuantizationCompressionSerializer(item as ScalarQuantizationCompression); + + case "binaryQuantization": + return binaryQuantizationCompressionSerializer(item as BinaryQuantizationCompression); + + default: + return vectorSearchCompressionSerializer(item); + } +} + +export function vectorSearchCompressionUnionDeserializer(item: any): VectorSearchCompressionUnion { + switch (item.kind) { + case "scalarQuantization": + return scalarQuantizationCompressionDeserializer(item as ScalarQuantizationCompression); + + case "binaryQuantization": + return binaryQuantizationCompressionDeserializer(item as BinaryQuantizationCompression); + + default: + return vectorSearchCompressionDeserializer(item); + } +} + +/** Contains the options for rescoring. */ +export interface RescoringOptions { + /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */ + enableRescoring?: boolean; + /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */ + defaultOversampling?: number; + /** Controls the storage method for original vectors. This setting is immutable. */ + rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; +} + +export function rescoringOptionsSerializer(item: RescoringOptions): any { + return { + enableRescoring: item["enableRescoring"], + defaultOversampling: item["defaultOversampling"], + rescoreStorageMethod: item["rescoreStorageMethod"], + }; +} + +export function rescoringOptionsDeserializer(item: any): RescoringOptions { + return { + enableRescoring: item["enableRescoring"], + defaultOversampling: item["defaultOversampling"], + rescoreStorageMethod: item["rescoreStorageMethod"], + }; +} + +/** The storage method for the original full-precision vectors used for rescoring and internal index operations. */ +export enum KnownVectorSearchCompressionRescoreStorageMethod { + /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */ + PreserveOriginals = "preserveOriginals", + /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */ + DiscardOriginals = "discardOriginals", +} + +/** + * The storage method for the original full-precision vectors used for rescoring and internal index operations. \ + * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \ + * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. + */ +export type VectorSearchCompressionRescoreStorageMethod = string; + +/** The compression method used for indexing and querying. */ +export enum KnownVectorSearchCompressionKind { + /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */ + ScalarQuantization = "scalarQuantization", + /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */ + BinaryQuantization = "binaryQuantization", +} + +/** + * The compression method used for indexing and querying. \ + * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \ + * **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. + */ +export type VectorSearchCompressionKind = string; + +/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */ +export interface ScalarQuantizationCompression extends VectorSearchCompression { + /** Contains the parameters specific to Scalar Quantization. */ + parameters?: ScalarQuantizationParameters; + /** The name of the kind of compression method being configured for use with vector search. */ + kind: "scalarQuantization"; +} + +export function scalarQuantizationCompressionSerializer(item: ScalarQuantizationCompression): any { + return { + name: item["compressionName"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsSerializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + scalarQuantizationParameters: !item["parameters"] + ? item["parameters"] + : scalarQuantizationParametersSerializer(item["parameters"]), + }; +} + +export function scalarQuantizationCompressionDeserializer( + item: any, +): ScalarQuantizationCompression { + return { + compressionName: item["name"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsDeserializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + parameters: !item["scalarQuantizationParameters"] + ? item["scalarQuantizationParameters"] + : scalarQuantizationParametersDeserializer(item["scalarQuantizationParameters"]), + }; +} + +/** Contains the parameters specific to Scalar Quantization. */ +export interface ScalarQuantizationParameters { + /** The quantized data type of compressed vector values. */ + quantizedDataType?: VectorSearchCompressionTarget; +} + +export function scalarQuantizationParametersSerializer(item: ScalarQuantizationParameters): any { + return { quantizedDataType: item["quantizedDataType"] }; +} + +export function scalarQuantizationParametersDeserializer(item: any): ScalarQuantizationParameters { + return { + quantizedDataType: item["quantizedDataType"], + }; +} + +/** The quantized data type of compressed vector values. */ +export enum KnownVectorSearchCompressionTarget { + /** 8-bit signed integer. */ + Int8 = "int8", +} + +/** + * The quantized data type of compressed vector values. \ + * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **int8**: 8-bit signed integer. + */ +export type VectorSearchCompressionTarget = string; + +/** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */ +export interface BinaryQuantizationCompression extends VectorSearchCompression { + /** The name of the kind of compression method being configured for use with vector search. */ + kind: "binaryQuantization"; +} + +export function binaryQuantizationCompressionSerializer(item: BinaryQuantizationCompression): any { + return { + name: item["compressionName"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsSerializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +export function binaryQuantizationCompressionDeserializer( + item: any, +): BinaryQuantizationCompression { + return { + compressionName: item["name"], + rerankWithOriginalVectors: item["rerankWithOriginalVectors"], + defaultOversampling: item["defaultOversampling"], + rescoringOptions: !item["rescoringOptions"] + ? item["rescoringOptions"] + : rescoringOptionsDeserializer(item["rescoringOptions"]), + truncationDimension: item["truncationDimension"], + kind: item["kind"], + }; +} + +/** A value indicating whether permission filtering is enabled for the index. */ +export enum KnownSearchIndexPermissionFilterOption { + /** enabled. */ + Enabled = "enabled", + /** disabled. */ + Disabled = "disabled", +} + +/** + * A value indicating whether permission filtering is enabled for the index. \ + * {@link KnownSearchIndexPermissionFilterOption} can be used interchangeably with SearchIndexPermissionFilterOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **enabled**: enabled. \ + * **disabled**: disabled. + */ +export type SearchIndexPermissionFilterOption = string; + +/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */ +export interface _ListIndexesResult { + /** The indexes in the Search service. */ + readonly indexes: SearchIndex[]; +} + +export function _listIndexesResultDeserializer(item: any): _ListIndexesResult { + return { + indexes: searchIndexArrayDeserializer(item["value"]), + }; +} + +export function searchIndexArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchIndexSerializer(item); + }); +} + +export function searchIndexArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexDeserializer(item); + }); +} + +/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ +export interface GetIndexStatisticsResult { + /** The number of documents in the index. */ + documentCount: number; + /** The amount of storage in bytes consumed by the index. */ + storageSize: number; + /** The amount of memory in bytes consumed by vectors in the index. */ + vectorIndexSize: number; +} + +export function getIndexStatisticsResultDeserializer(item: any): GetIndexStatisticsResult { + return { + documentCount: item["documentCount"], + storageSize: item["storageSize"], + vectorIndexSize: item["vectorIndexSize"], + }; +} + +/** Specifies some text and analysis components used to break that text into tokens. */ +export interface AnalyzeTextOptions { + /** The text to break into tokens. */ + text: string; + /** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. */ + analyzerName?: LexicalAnalyzerName; + /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. */ + tokenizerName?: LexicalTokenizerName; + /** The name of the normalizer to use to normalize the given text. */ + normalizerName?: LexicalNormalizerName; + /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ + tokenFilters?: TokenFilterName[]; + /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ + charFilters?: CharFilterName[]; +} + +export function analyzeTextOptionsSerializer(item: AnalyzeTextOptions): any { + return { + text: item["text"], + analyzer: item["analyzerName"], + tokenizer: item["tokenizerName"], + normalizer: item["normalizerName"], + tokenFilters: !item["tokenFilters"] + ? item["tokenFilters"] + : item["tokenFilters"].map((p: any) => { + return p; + }), + charFilters: !item["charFilters"] + ? item["charFilters"] + : item["charFilters"].map((p: any) => { + return p; + }), + }; +} + +/** The result of testing an analyzer on text. */ +export interface AnalyzeResult { + /** The list of tokens returned by the analyzer specified in the request. */ + tokens: AnalyzedTokenInfo[]; +} + +export function analyzeResultDeserializer(item: any): AnalyzeResult { + return { + tokens: analyzedTokenInfoArrayDeserializer(item["tokens"]), + }; +} + +export function analyzedTokenInfoArraySerializer(result: Array): any[] { + return result.map((item) => { + return analyzedTokenInfoSerializer(item); + }); +} + +export function analyzedTokenInfoArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return analyzedTokenInfoDeserializer(item); + }); +} + +/** Information about a token returned by an analyzer. */ +export interface AnalyzedTokenInfo { + /** The token returned by the analyzer. */ + token: string; + /** The index of the first character of the token in the input text. */ + startOffset: number; + /** The index of the last character of the token in the input text. */ + endOffset: number; + /** The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other. */ + position: number; +} + +export function analyzedTokenInfoSerializer(item: AnalyzedTokenInfo): any { + return { + token: item["token"], + startOffset: item["startOffset"], + endOffset: item["endOffset"], + position: item["position"], + }; +} + +export function analyzedTokenInfoDeserializer(item: any): AnalyzedTokenInfo { + return { + token: item["token"], + startOffset: item["startOffset"], + endOffset: item["endOffset"], + position: item["position"], + }; +} + +/** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */ +export interface SearchAlias { + /** The name of the alias. */ + name: string; + /** The name of the index this alias maps to. Only one index name may be specified. */ + indexes: string[]; + /** The ETag of the alias. */ + eTag?: string; +} + +export function searchAliasSerializer(item: SearchAlias): any { + return { + name: item["name"], + indexes: item["indexes"].map((p: any) => { + return p; + }), + "@odata.etag": item["eTag"], + }; +} + +export function searchAliasDeserializer(item: any): SearchAlias { + return { + name: item["name"], + indexes: item["indexes"].map((p: any) => { + return p; + }), + eTag: item["@odata.etag"], + }; +} + +/** Response from a List Aliases request. If successful, it includes the associated index mappings for all aliases. */ +export interface _ListAliasesResult { + /** The aliases in the Search service. */ + readonly aliases: SearchAlias[]; +} + +export function _listAliasesResultDeserializer(item: any): _ListAliasesResult { + return { + aliases: searchAliasArrayDeserializer(item["value"]), + }; +} + +export function searchAliasArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchAliasSerializer(item); + }); +} + +export function searchAliasArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchAliasDeserializer(item); + }); +} + +/** Represents a knowledge base definition. */ +export interface KnowledgeBase { + /** The name of the knowledge base. */ + readonly name: string; + /** Knowledge sources referenced by this knowledge base. */ + knowledgeSources: KnowledgeSourceReference[]; + /** Contains configuration options on how to connect to AI models. */ + models?: KnowledgeBaseModelUnion[]; + /** The retrieval reasoning effort configuration. */ + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; + /** The output mode for the knowledge base. */ + outputMode?: KnowledgeRetrievalOutputMode; + /** The ETag of the knowledge base. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. */ + encryptionKey?: SearchResourceEncryptionKey; + /** The description of the knowledge base. */ + description?: string; + /** Instructions considered by the knowledge base when developing query plan. */ + retrievalInstructions?: string; + /** Instructions considered by the knowledge base when generating answers. */ + answerInstructions?: string; +} + +export function knowledgeBaseSerializer(item: KnowledgeBase): any { + return { + knowledgeSources: knowledgeSourceReferenceArraySerializer(item["knowledgeSources"]), + models: !item["models"] + ? item["models"] + : knowledgeBaseModelUnionArraySerializer(item["models"]), + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionSerializer(item["retrievalReasoningEffort"]), + outputMode: item["outputMode"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + description: item["description"], + retrievalInstructions: item["retrievalInstructions"], + answerInstructions: item["answerInstructions"], + }; +} + +export function knowledgeBaseDeserializer(item: any): KnowledgeBase { + return { + name: item["name"], + knowledgeSources: knowledgeSourceReferenceArrayDeserializer(item["knowledgeSources"]), + models: !item["models"] + ? item["models"] + : knowledgeBaseModelUnionArrayDeserializer(item["models"]), + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionDeserializer(item["retrievalReasoningEffort"]), + outputMode: item["outputMode"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + description: item["description"], + retrievalInstructions: item["retrievalInstructions"], + answerInstructions: item["answerInstructions"], + }; +} + +export function knowledgeSourceReferenceArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeSourceReferenceSerializer(item); + }); +} + +export function knowledgeSourceReferenceArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeSourceReferenceDeserializer(item); + }); +} + +/** Reference to a knowledge source. */ +export interface KnowledgeSourceReference { + /** The name of the knowledge source. */ + name: string; +} + +export function knowledgeSourceReferenceSerializer(item: KnowledgeSourceReference): any { + return { name: item["name"] }; +} + +export function knowledgeSourceReferenceDeserializer(item: any): KnowledgeSourceReference { + return { + name: item["name"], + }; +} + +export function knowledgeBaseModelUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseModelUnionSerializer(item); + }); +} + +export function knowledgeBaseModelUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseModelUnionDeserializer(item); + }); +} + +/** Specifies the connection parameters for the model to use for query planning. */ +export interface KnowledgeBaseModel { + /** The AI model to be used for query planning. */ + /** The discriminator possible values: azureOpenAI */ + kind: KnowledgeBaseModelKind; +} + +export function knowledgeBaseModelSerializer(item: KnowledgeBaseModel): any { + return { kind: item["kind"] }; +} + +export function knowledgeBaseModelDeserializer(item: any): KnowledgeBaseModel { + return { + kind: item["kind"], + }; +} + +/** Alias for KnowledgeBaseModelUnion */ +export type KnowledgeBaseModelUnion = KnowledgeBaseAzureOpenAIModel | KnowledgeBaseModel; + +export function knowledgeBaseModelUnionSerializer(item: KnowledgeBaseModelUnion): any { + switch (item.kind) { + case "azureOpenAI": + return knowledgeBaseAzureOpenAIModelSerializer(item as KnowledgeBaseAzureOpenAIModel); + + default: + return knowledgeBaseModelSerializer(item); + } +} + +export function knowledgeBaseModelUnionDeserializer(item: any): KnowledgeBaseModelUnion { + switch (item.kind) { + case "azureOpenAI": + return knowledgeBaseAzureOpenAIModelDeserializer(item as KnowledgeBaseAzureOpenAIModel); + + default: + return knowledgeBaseModelDeserializer(item); + } +} + +/** The AI model to be used for query planning. */ +export enum KnownKnowledgeBaseModelKind { + /** Use Azure Open AI models for query planning. */ + AzureOpenAI = "azureOpenAI", +} + +/** + * The AI model to be used for query planning. \ + * {@link KnownKnowledgeBaseModelKind} can be used interchangeably with KnowledgeBaseModelKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **azureOpenAI**: Use Azure Open AI models for query planning. + */ +export type KnowledgeBaseModelKind = string; + +/** Specifies the Azure OpenAI resource used to do query planning. */ +export interface KnowledgeBaseAzureOpenAIModel extends KnowledgeBaseModel { + kind: "azureOpenAI"; + /** Azure OpenAI parameters. */ + azureOpenAIParameters: AzureOpenAiParameters; +} + +export function knowledgeBaseAzureOpenAIModelSerializer(item: KnowledgeBaseAzureOpenAIModel): any { + return { + kind: item["kind"], + azureOpenAIParameters: azureOpenAiParametersSerializer(item["azureOpenAIParameters"]), + }; +} + +export function knowledgeBaseAzureOpenAIModelDeserializer( + item: any, +): KnowledgeBaseAzureOpenAIModel { + return { + kind: item["kind"], + azureOpenAIParameters: azureOpenAiParametersDeserializer(item["azureOpenAIParameters"]), + }; +} + +/** Specifies the parameters for connecting to the Azure OpenAI resource. */ +export interface AzureOpenAiParameters { + /** The resource URI of the Azure OpenAI resource. */ + resourceUri: string; + /** ID of the Azure OpenAI model deployment on the designated resource. */ + deploymentId: string; + /** API key of the designated Azure OpenAI resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: string; + /** The name of the embedding model that is deployed at the provided deploymentId path. */ + modelName?: AzureOpenAIModelName; + /** The authentication method to use when connecting to the Azure OpenAI resource. */ + authenticationMethod?: string; +} + +export function azureOpenAiParametersSerializer(item: AzureOpenAiParameters): any { + return { + resourceUri: item["resourceUri"], + deploymentId: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: item["authIdentity"], + modelName: item["modelName"], + authenticationMethod: item["authenticationMethod"], + }; +} + +export function azureOpenAiParametersDeserializer(item: any): AzureOpenAiParameters { + return { + resourceUri: item["resourceUri"], + deploymentId: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: item["authIdentity"], + modelName: item["modelName"], + authenticationMethod: item["authenticationMethod"], + }; +} + +/** Result from listing knowledge bases. */ +export interface _ListKnowledgeBasesResult { + /** The knowledge bases in the service. */ + value: KnowledgeBase[]; +} + +export function _listKnowledgeBasesResultDeserializer(item: any): _ListKnowledgeBasesResult { + return { + value: knowledgeBaseArrayDeserializer(item["value"]), + }; +} + +export function knowledgeBaseArraySerializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseSerializer(item); + }); +} + +export function knowledgeBaseArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseDeserializer(item); + }); +} + +/** Represents a knowledge source definition. */ +export interface KnowledgeSource { + /** The name of the knowledge source. */ + readonly name: string; + /** Optional user-defined description. */ + description?: string; + /** The type of the knowledge source. */ + /** The discriminator possible values: searchIndex, azureBlob, indexedSharePoint, indexedOneLake, web, remoteSharePoint */ + kind: KnowledgeSourceKind; + /** The ETag of the agent. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. */ + encryptionKey?: SearchResourceEncryptionKey; +} + +export function knowledgeSourceSerializer(item: KnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + }; +} + +export function knowledgeSourceDeserializer(item: any): KnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + }; +} + +/** Alias for KnowledgeSourceUnion */ +export type KnowledgeSourceUnion = + | SearchIndexKnowledgeSource + | AzureBlobKnowledgeSource + | IndexedSharePointKnowledgeSource + | IndexedOneLakeKnowledgeSource + | WebKnowledgeSource + | RemoteSharePointKnowledgeSource + | KnowledgeSource; + +export function knowledgeSourceUnionSerializer(item: KnowledgeSourceUnion): any { + switch (item.kind) { + case "searchIndex": + return searchIndexKnowledgeSourceSerializer(item as SearchIndexKnowledgeSource); + + case "azureBlob": + return azureBlobKnowledgeSourceSerializer(item as AzureBlobKnowledgeSource); + + case "indexedSharePoint": + return indexedSharePointKnowledgeSourceSerializer(item as IndexedSharePointKnowledgeSource); + + case "indexedOneLake": + return indexedOneLakeKnowledgeSourceSerializer(item as IndexedOneLakeKnowledgeSource); + + case "web": + return webKnowledgeSourceSerializer(item as WebKnowledgeSource); + + case "remoteSharePoint": + return remoteSharePointKnowledgeSourceSerializer(item as RemoteSharePointKnowledgeSource); + + default: + return knowledgeSourceSerializer(item); + } +} + +export function knowledgeSourceUnionDeserializer(item: any): KnowledgeSourceUnion { + switch (item.kind) { + case "searchIndex": + return searchIndexKnowledgeSourceDeserializer(item as SearchIndexKnowledgeSource); + + case "azureBlob": + return azureBlobKnowledgeSourceDeserializer(item as AzureBlobKnowledgeSource); + + case "indexedSharePoint": + return indexedSharePointKnowledgeSourceDeserializer(item as IndexedSharePointKnowledgeSource); + + case "indexedOneLake": + return indexedOneLakeKnowledgeSourceDeserializer(item as IndexedOneLakeKnowledgeSource); + + case "web": + return webKnowledgeSourceDeserializer(item as WebKnowledgeSource); + + case "remoteSharePoint": + return remoteSharePointKnowledgeSourceDeserializer(item as RemoteSharePointKnowledgeSource); + + default: + return knowledgeSourceDeserializer(item); + } +} + +/** The kind of the knowledge source. */ +export enum KnownKnowledgeSourceKind { + /** A knowledge source that reads data from a Search Index. */ + SearchIndex = "searchIndex", + /** A knowledge source that read and ingest data from Azure Blob Storage to a Search Index. */ + AzureBlob = "azureBlob", + /** A knowledge source that reads data from indexed SharePoint. */ + IndexedSharePoint = "indexedSharePoint", + /** A knowledge source that reads data from indexed OneLake. */ + IndexedOneLake = "indexedOneLake", + /** A knowledge source that reads data from the web. */ + Web = "web", + /** A knowledge source that reads data from remote SharePoint. */ + RemoteSharePoint = "remoteSharePoint", +} + +/** + * The kind of the knowledge source. \ + * {@link KnownKnowledgeSourceKind} can be used interchangeably with KnowledgeSourceKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **searchIndex**: A knowledge source that reads data from a Search Index. \ + * **azureBlob**: A knowledge source that read and ingest data from Azure Blob Storage to a Search Index. \ + * **indexedSharePoint**: A knowledge source that reads data from indexed SharePoint. \ + * **indexedOneLake**: A knowledge source that reads data from indexed OneLake. \ + * **web**: A knowledge source that reads data from the web. \ + * **remoteSharePoint**: A knowledge source that reads data from remote SharePoint. + */ +export type KnowledgeSourceKind = string; + +/** Knowledge Source targeting a search index. */ +export interface SearchIndexKnowledgeSource extends KnowledgeSource { + kind: "searchIndex"; + /** The parameters for the knowledge source. */ + searchIndexParameters: SearchIndexKnowledgeSourceParameters; +} + +export function searchIndexKnowledgeSourceSerializer(item: SearchIndexKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + searchIndexParameters: searchIndexKnowledgeSourceParametersSerializer( + item["searchIndexParameters"], + ), + }; +} + +export function searchIndexKnowledgeSourceDeserializer(item: any): SearchIndexKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + searchIndexParameters: searchIndexKnowledgeSourceParametersDeserializer( + item["searchIndexParameters"], + ), + }; +} + +/** Parameters for search index knowledge source. */ +export interface SearchIndexKnowledgeSourceParameters { + /** The name of the Search index. */ + searchIndexName: string; + /** Used to request additional fields for referenced source data. */ + sourceDataSelect?: string; +} + +export function searchIndexKnowledgeSourceParametersSerializer( + item: SearchIndexKnowledgeSourceParameters, +): any { + return { + searchIndexName: item["searchIndexName"], + sourceDataSelect: item["sourceDataSelect"], + }; +} + +export function searchIndexKnowledgeSourceParametersDeserializer( + item: any, +): SearchIndexKnowledgeSourceParameters { + return { + searchIndexName: item["searchIndexName"], + sourceDataSelect: item["sourceDataSelect"], + }; +} + +/** Configuration for Azure Blob Storage knowledge source. */ +export interface AzureBlobKnowledgeSource extends KnowledgeSource { + kind: "azureBlob"; + /** The type of the knowledge source. */ + azureBlobParameters: AzureBlobKnowledgeSourceParameters; +} + +export function azureBlobKnowledgeSourceSerializer(item: AzureBlobKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + azureBlobParameters: azureBlobKnowledgeSourceParametersSerializer(item["azureBlobParameters"]), + }; +} + +export function azureBlobKnowledgeSourceDeserializer(item: any): AzureBlobKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + azureBlobParameters: azureBlobKnowledgeSourceParametersDeserializer( + item["azureBlobParameters"], + ), + }; +} + +/** Parameters for Azure Blob Storage knowledge source. */ +export interface AzureBlobKnowledgeSourceParameters { + /** An explicit identity to use for this knowledge source. */ + identity?: SearchIndexerDataIdentityUnion; + /** Key-based connection string or the ResourceId format if using a managed identity. */ + connectionString: string; + /** The name of the blob storage container. */ + containerName: string; + /** Optional folder path within the container. */ + folderPath?: string; + /** Optional vectorizer configuration for vectorizing content. */ + embeddingModel?: VectorSearchVectorizerUnion; + /** Optional chat completion model for image verbalization or context extraction. */ + chatCompletionModel?: KnowledgeBaseModelUnion; + /** Optional schedule for data ingestion. */ + ingestionSchedule?: IndexingSchedule; + /** Resources created by the knowledge source. */ + readonly createdResources?: CreatedResources; + /** Indicates whether image verbalization should be disabled. */ + disableImageVerbalization?: boolean; +} + +export function azureBlobKnowledgeSourceParametersSerializer( + item: AzureBlobKnowledgeSourceParameters, +): any { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + folderPath: item["folderPath"], + embeddingModel: !item["embeddingModel"] + ? item["embeddingModel"] + : vectorSearchVectorizerUnionSerializer(item["embeddingModel"]), + chatCompletionModel: !item["chatCompletionModel"] + ? item["chatCompletionModel"] + : knowledgeBaseModelUnionSerializer(item["chatCompletionModel"]), + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleSerializer(item["ingestionSchedule"]), + disableImageVerbalization: item["disableImageVerbalization"], + }; +} + +export function azureBlobKnowledgeSourceParametersDeserializer( + item: any, +): AzureBlobKnowledgeSourceParameters { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + folderPath: item["folderPath"], + embeddingModel: !item["embeddingModel"] + ? item["embeddingModel"] + : vectorSearchVectorizerUnionDeserializer(item["embeddingModel"]), + chatCompletionModel: !item["chatCompletionModel"] + ? item["chatCompletionModel"] + : knowledgeBaseModelUnionDeserializer(item["chatCompletionModel"]), + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleDeserializer(item["ingestionSchedule"]), + createdResources: !item["createdResources"] + ? item["createdResources"] + : createdResourcesDeserializer(item["createdResources"]), + disableImageVerbalization: item["disableImageVerbalization"], + }; +} + +/** Represents a schedule for indexer execution. */ +export interface IndexingSchedule { + /** The interval of time between indexer executions. */ + interval: string; + /** The time when an indexer should start running. */ + startTime?: Date; +} + +export function indexingScheduleSerializer(item: IndexingSchedule): any { + return { + interval: item["interval"], + startTime: !item["startTime"] ? item["startTime"] : item["startTime"].toISOString(), + }; +} + +export function indexingScheduleDeserializer(item: any): IndexingSchedule { + return { + interval: item["interval"], + startTime: !item["startTime"] ? item["startTime"] : new Date(item["startTime"]), + }; +} + +/** Resources created by the knowledge source. Keys represent resource types (e.g., 'datasource', 'indexer', 'skillset', 'index') and values represent resource names. */ +export interface CreatedResources { + /** Additional properties */ + additionalProperties?: Record; +} + +export function createdResourcesSerializer(item: CreatedResources): any { + return { ...serializeRecord(item.additionalProperties ?? {}) }; +} + +export function createdResourcesDeserializer(item: any): CreatedResources { + return { + additionalProperties: serializeRecord(item, []), + }; +} + +/** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ +export enum KnownBlobIndexerDataToExtract { + /** Indexes just the standard blob properties and user-specified metadata. */ + StorageMetadata = "storageMetadata", + /** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */ + AllMetadata = "allMetadata", + /** Extracts all metadata and textual content from each blob. */ + ContentAndMetadata = "contentAndMetadata", +} + +/** + * Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. \ + * {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \ + * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \ + * **contentAndMetadata**: Extracts all metadata and textual content from each blob. + */ +export type BlobIndexerDataToExtract = string; + +/** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */ +export enum KnownBlobIndexerImageAction { + /** Ignores embedded images or image files in the data set. This is the default. */ + None = "none", + /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */ + GenerateNormalizedImages = "generateNormalizedImages", + /** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. */ + GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage", +} + +/** + * Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. \ + * {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Ignores embedded images or image files in the data set. This is the default. \ + * **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \ + * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. + */ +export type BlobIndexerImageAction = string; + +/** Represents the parsing mode for indexing from an Azure blob data source. */ +export enum KnownBlobIndexerParsingMode { + /** Set to default for normal file processing. */ + Default = "default", + /** Set to text to improve indexing performance on plain text files in blob storage. */ + Text = "text", + /** Set to delimitedText when blobs are plain CSV files. */ + DelimitedText = "delimitedText", + /** Set to json to extract structured content from JSON files. */ + Json = "json", + /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */ + JsonArray = "jsonArray", + /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */ + JsonLines = "jsonLines", + /** Set to markdown to extract content from markdown files. */ + Markdown = "markdown", +} + +/** + * Represents the parsing mode for indexing from an Azure blob data source. \ + * {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **default**: Set to default for normal file processing. \ + * **text**: Set to text to improve indexing performance on plain text files in blob storage. \ + * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \ + * **json**: Set to json to extract structured content from JSON files. \ + * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \ + * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. \ + * **markdown**: Set to markdown to extract content from markdown files. + */ +export type BlobIndexerParsingMode = string; + +/** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ +export enum KnownMarkdownHeaderDepth { + /** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */ + H1 = "h1", + /** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */ + H2 = "h2", + /** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */ + H3 = "h3", + /** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */ + H4 = "h4", + /** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */ + H5 = "h5", + /** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */ + H6 = "h6", +} + +/** + * Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. \ + * {@link KnownMarkdownHeaderDepth} can be used interchangeably with MarkdownHeaderDepth, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **h1**: Indicates that headers up to a level of h1 will be considered while grouping markdown content. \ + * **h2**: Indicates that headers up to a level of h2 will be considered while grouping markdown content. \ + * **h3**: Indicates that headers up to a level of h3 will be considered while grouping markdown content. \ + * **h4**: Indicates that headers up to a level of h4 will be considered while grouping markdown content. \ + * **h5**: Indicates that headers up to a level of h5 will be considered while grouping markdown content. \ + * **h6**: Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. + */ +export type MarkdownHeaderDepth = string; + +/** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ +export enum KnownMarkdownParsingSubmode { + /** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */ + OneToMany = "oneToMany", + /** Indicates that each markdown file will be parsed into a single search document. */ + OneToOne = "oneToOne", +} + +/** + * Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. \ + * {@link KnownMarkdownParsingSubmode} can be used interchangeably with MarkdownParsingSubmode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneToMany**: Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. \ + * **oneToOne**: Indicates that each markdown file will be parsed into a single search document. + */ +export type MarkdownParsingSubmode = string; + +/** Determines algorithm for text extraction from PDF files in Azure blob storage. */ +export enum KnownBlobIndexerPDFTextRotationAlgorithm { + /** Leverages normal text extraction. This is the default. */ + None = "none", + /** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */ + DetectAngles = "detectAngles", +} + +/** + * Determines algorithm for text extraction from PDF files in Azure blob storage. \ + * {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Leverages normal text extraction. This is the default. \ + * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. + */ +export type BlobIndexerPDFTextRotationAlgorithm = string; + +/** Result from listing knowledge sources. */ +export interface _ListKnowledgeSourcesResult { + /** The knowledge sources in the service. */ + value: KnowledgeSourceUnion[]; +} + +export function _listKnowledgeSourcesResultDeserializer(item: any): _ListKnowledgeSourcesResult { + return { + value: knowledgeSourceUnionArrayDeserializer(item["value"]), + }; +} + +export function knowledgeSourceUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return knowledgeSourceUnionSerializer(item); + }); +} + +export function knowledgeSourceUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return knowledgeSourceUnionDeserializer(item); + }); +} + +/** Response from a get service statistics request. If successful, it includes service level counters and limits. */ +export interface SearchServiceStatistics { + /** Service level resource counters. */ + counters: ServiceCounters; + /** Service level general limits. */ + limits: ServiceLimits; + /** Service level indexer runtime consumption. */ + indexersRuntime?: ServiceIndexersRuntime; +} + +export function searchServiceStatisticsDeserializer(item: any): SearchServiceStatistics { + return { + counters: serviceCountersDeserializer(item["counters"]), + limits: serviceLimitsDeserializer(item["limits"]), + indexersRuntime: !item["indexersRuntime"] + ? item["indexersRuntime"] + : serviceIndexersRuntimeDeserializer(item["indexersRuntime"]), + }; +} + +/** Represents service-level resource counters and quotas. */ +export interface ServiceCounters { + /** Total number of aliases. */ + aliasCounter: ResourceCounter; + /** Total number of documents across all indexes in the service. */ + documentCounter: ResourceCounter; + /** Total number of indexes. */ + indexCounter: ResourceCounter; + /** Total number of indexers. */ + indexerCounter: ResourceCounter; + /** Total number of data sources. */ + dataSourceCounter: ResourceCounter; + /** Total size of used storage in bytes. */ + storageSizeCounter: ResourceCounter; + /** Total number of synonym maps. */ + synonymMapCounter: ResourceCounter; + /** Total number of skillsets. */ + skillsetCounter: ResourceCounter; + /** Total memory consumption of all vector indexes within the service, in bytes. */ + vectorIndexSizeCounter: ResourceCounter; +} + +export function serviceCountersDeserializer(item: any): ServiceCounters { + return { + aliasCounter: resourceCounterDeserializer(item["aliasesCount"]), + documentCounter: resourceCounterDeserializer(item["documentCount"]), + indexCounter: resourceCounterDeserializer(item["indexesCount"]), + indexerCounter: resourceCounterDeserializer(item["indexersCount"]), + dataSourceCounter: resourceCounterDeserializer(item["dataSourcesCount"]), + storageSizeCounter: resourceCounterDeserializer(item["storageSize"]), + synonymMapCounter: resourceCounterDeserializer(item["synonymMaps"]), + skillsetCounter: resourceCounterDeserializer(item["skillsetCount"]), + vectorIndexSizeCounter: resourceCounterDeserializer(item["vectorIndexSize"]), + }; +} + +/** Represents a resource's usage and quota. */ +export interface ResourceCounter { + /** The resource usage amount. */ + usage: number; + /** The resource amount quota. */ + quota?: number; +} + +export function resourceCounterDeserializer(item: any): ResourceCounter { + return { + usage: item["usage"], + quota: item["quota"], + }; +} + +/** Represents various service level limits. */ +export interface ServiceLimits { + /** The maximum allowed fields per index. */ + maxFieldsPerIndex?: number; + /** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */ + maxFieldNestingDepthPerIndex?: number; + /** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */ + maxComplexCollectionFieldsPerIndex?: number; + /** The maximum number of objects in complex collections allowed per document. */ + maxComplexObjectsInCollectionsPerDocument?: number; + /** The maximum amount of storage in bytes allowed per index. */ + maxStoragePerIndexInBytes?: number; + /** The maximum cumulative indexer runtime in seconds allowed for the service. */ + maxCumulativeIndexerRuntimeSeconds?: number; +} + +export function serviceLimitsDeserializer(item: any): ServiceLimits { + return { + maxFieldsPerIndex: item["maxFieldsPerIndex"], + maxFieldNestingDepthPerIndex: item["maxFieldNestingDepthPerIndex"], + maxComplexCollectionFieldsPerIndex: item["maxComplexCollectionFieldsPerIndex"], + maxComplexObjectsInCollectionsPerDocument: item["maxComplexObjectsInCollectionsPerDocument"], + maxStoragePerIndexInBytes: item["maxStoragePerIndex"], + maxCumulativeIndexerRuntimeSeconds: item["maxCumulativeIndexerRuntimeSeconds"], + }; +} + +/** Response from a request to retrieve stats summary of all indexes. If successful, it includes the stats of each index in the service. */ +export interface _ListIndexStatsSummary { + /** The Statistics summary of all indexes in the Search service. */ + readonly indexesStatistics: IndexStatisticsSummary[]; +} + +export function _listIndexStatsSummaryDeserializer(item: any): _ListIndexStatsSummary { + return { + indexesStatistics: indexStatisticsSummaryArrayDeserializer(item["value"]), + }; +} + +export function indexStatisticsSummaryArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return indexStatisticsSummaryDeserializer(item); + }); +} + +/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */ +export interface IndexStatisticsSummary { + /** The name of the index. */ + readonly name: string; + /** The number of documents in the index. */ + readonly documentCount: number; + /** The amount of storage in bytes consumed by the index. */ + readonly storageSize: number; + /** The amount of memory in bytes consumed by vectors in the index. */ + readonly vectorIndexSize?: number; +} + +export function indexStatisticsSummaryDeserializer(item: any): IndexStatisticsSummary { + return { + name: item["name"], + documentCount: item["documentCount"], + storageSize: item["storageSize"], + vectorIndexSize: item["vectorIndexSize"], + }; +} + +/** Represents a datasource definition, which can be used to configure an indexer. */ +export interface SearchIndexerDataSourceConnection { + /** The name of the datasource. */ + name: string; + /** The description of the datasource. */ + description?: string; + /** The type of the datasource. */ + type: SearchIndexerDataSourceType; + /** A specific type of the data source, in case the resource is capable of different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts. */ + subType?: string; + /** Credentials for the datasource. */ + credentials: DataSourceCredentials; + /** The data container for the datasource. */ + container: SearchIndexerDataContainer; + /** An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If "none" is specified, the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** Ingestion options with various types of permission data. */ + indexerPermissionOptions?: IndexerPermissionOption[]; + /** The data change detection policy for the datasource. */ + dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion; + /** The data deletion detection policy for the datasource. */ + dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion; + /** The ETag of the data source. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition. Once you have encrypted your data source definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; +} + +export function searchIndexerDataSourceConnectionSerializer( + item: SearchIndexerDataSourceConnection, +): any { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subType: item["subType"], + credentials: dataSourceCredentialsSerializer(item["credentials"]), + container: searchIndexerDataContainerSerializer(item["container"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + indexerPermissionOptions: !item["indexerPermissionOptions"] + ? item["indexerPermissionOptions"] + : item["indexerPermissionOptions"].map((p: any) => { + return p; + }), + dataChangeDetectionPolicy: !item["dataChangeDetectionPolicy"] + ? item["dataChangeDetectionPolicy"] + : dataChangeDetectionPolicyUnionSerializer(item["dataChangeDetectionPolicy"]), + dataDeletionDetectionPolicy: !item["dataDeletionDetectionPolicy"] + ? item["dataDeletionDetectionPolicy"] + : dataDeletionDetectionPolicyUnionSerializer(item["dataDeletionDetectionPolicy"]), + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + }; +} + +export function searchIndexerDataSourceConnectionDeserializer( + item: any, +): SearchIndexerDataSourceConnection { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subType: item["subType"], + credentials: dataSourceCredentialsDeserializer(item["credentials"]), + container: searchIndexerDataContainerDeserializer(item["container"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + indexerPermissionOptions: !item["indexerPermissionOptions"] + ? item["indexerPermissionOptions"] + : item["indexerPermissionOptions"].map((p: any) => { + return p; + }), + dataChangeDetectionPolicy: !item["dataChangeDetectionPolicy"] + ? item["dataChangeDetectionPolicy"] + : dataChangeDetectionPolicyUnionDeserializer(item["dataChangeDetectionPolicy"]), + dataDeletionDetectionPolicy: !item["dataDeletionDetectionPolicy"] + ? item["dataDeletionDetectionPolicy"] + : dataDeletionDetectionPolicyUnionDeserializer(item["dataDeletionDetectionPolicy"]), + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + }; +} + +/** Defines the type of a datasource. */ +export enum KnownSearchIndexerDataSourceType { + /** Indicates an Azure SQL datasource. */ + AzureSql = "azuresql", + /** Indicates a CosmosDB datasource. */ + CosmosDb = "cosmosdb", + /** Indicates an Azure Blob datasource. */ + AzureBlob = "azureblob", + /** Indicates an Azure Table datasource. */ + AzureTable = "azuretable", + /** Indicates a MySql datasource. */ + MySql = "mysql", + /** Indicates an ADLS Gen2 datasource. */ + AdlsGen2 = "adlsgen2", + /** Indicates a Microsoft Fabric OneLake datasource. */ + OneLake = "onelake", +} + +/** + * Defines the type of a datasource. \ + * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **azuresql**: Indicates an Azure SQL datasource. \ + * **cosmosdb**: Indicates a CosmosDB datasource. \ + * **azureblob**: Indicates an Azure Blob datasource. \ + * **azuretable**: Indicates an Azure Table datasource. \ + * **mysql**: Indicates a MySql datasource. \ + * **adlsgen2**: Indicates an ADLS Gen2 datasource. \ + * **onelake**: Indicates a Microsoft Fabric OneLake datasource. + */ +export type SearchIndexerDataSourceType = string; + +/** Represents credentials that can be used to connect to a datasource. */ +export interface DataSourceCredentials { + /** The connection string for the datasource. Set to `` (with brackets) if you don't want the connection string updated. Set to `` if you want to remove the connection string value from the datasource. */ + connectionString?: string; +} + +export function dataSourceCredentialsSerializer(item: DataSourceCredentials): any { + return { connectionString: item["connectionString"] }; +} + +export function dataSourceCredentialsDeserializer(item: any): DataSourceCredentials { + return { + connectionString: item["connectionString"], + }; +} + +/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */ +export interface SearchIndexerDataContainer { + /** The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. */ + name: string; + /** A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. */ + query?: string; +} + +export function searchIndexerDataContainerSerializer(item: SearchIndexerDataContainer): any { + return { name: item["name"], query: item["query"] }; +} + +export function searchIndexerDataContainerDeserializer(item: any): SearchIndexerDataContainer { + return { + name: item["name"], + query: item["query"], + }; +} + +/** Options with various types of permission data to index. */ +export enum KnownIndexerPermissionOption { + /** Indexer to ingest ACL userIds from data source to index. */ + UserIds = "userIds", + /** Indexer to ingest ACL groupIds from data source to index. */ + GroupIds = "groupIds", + /** Indexer to ingest Azure RBAC scope from data source to index. */ + RbacScope = "rbacScope", +} + +/** + * Options with various types of permission data to index. \ + * {@link KnownIndexerPermissionOption} can be used interchangeably with IndexerPermissionOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **userIds**: Indexer to ingest ACL userIds from data source to index. \ + * **groupIds**: Indexer to ingest ACL groupIds from data source to index. \ + * **rbacScope**: Indexer to ingest Azure RBAC scope from data source to index. + */ +export type IndexerPermissionOption = string; + +/** Base type for data change detection policies. */ +export interface DataChangeDetectionPolicy { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy, #Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy */ + odatatype: string; +} + +export function dataChangeDetectionPolicySerializer(item: DataChangeDetectionPolicy): any { + return { "@odata.type": item["odatatype"] }; +} + +export function dataChangeDetectionPolicyDeserializer(item: any): DataChangeDetectionPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for DataChangeDetectionPolicyUnion */ +export type DataChangeDetectionPolicyUnion = + | HighWaterMarkChangeDetectionPolicy + | SqlIntegratedChangeTrackingPolicy + | DataChangeDetectionPolicy; + +export function dataChangeDetectionPolicyUnionSerializer( + item: DataChangeDetectionPolicyUnion, +): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": + return highWaterMarkChangeDetectionPolicySerializer( + item as HighWaterMarkChangeDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": + return sqlIntegratedChangeTrackingPolicySerializer(item as SqlIntegratedChangeTrackingPolicy); + + default: + return dataChangeDetectionPolicySerializer(item); + } +} + +export function dataChangeDetectionPolicyUnionDeserializer( + item: any, +): DataChangeDetectionPolicyUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": + return highWaterMarkChangeDetectionPolicyDeserializer( + item as HighWaterMarkChangeDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": + return sqlIntegratedChangeTrackingPolicyDeserializer( + item as SqlIntegratedChangeTrackingPolicy, + ); + + default: + return dataChangeDetectionPolicyDeserializer(item); + } +} + +/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */ +export interface HighWaterMarkChangeDetectionPolicy extends DataChangeDetectionPolicy { + /** The name of the high water mark column. */ + highWaterMarkColumnName: string; + /** A URI fragment specifying the type of data change detection policy. */ + odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"; +} + +export function highWaterMarkChangeDetectionPolicySerializer( + item: HighWaterMarkChangeDetectionPolicy, +): any { + return { + "@odata.type": item["odatatype"], + highWaterMarkColumnName: item["highWaterMarkColumnName"], + }; +} + +export function highWaterMarkChangeDetectionPolicyDeserializer( + item: any, +): HighWaterMarkChangeDetectionPolicy { + return { + odatatype: item["@odata.type"], + highWaterMarkColumnName: item["highWaterMarkColumnName"], + }; +} + +/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */ +export interface SqlIntegratedChangeTrackingPolicy extends DataChangeDetectionPolicy { + /** A URI fragment specifying the type of data change detection policy. */ + odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; +} + +export function sqlIntegratedChangeTrackingPolicySerializer( + item: SqlIntegratedChangeTrackingPolicy, +): any { + return { "@odata.type": item["odatatype"] }; +} + +export function sqlIntegratedChangeTrackingPolicyDeserializer( + item: any, +): SqlIntegratedChangeTrackingPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Base type for data deletion detection policies. */ +export interface DataDeletionDetectionPolicy { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy, #Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy */ + odatatype: string; +} + +export function dataDeletionDetectionPolicySerializer(item: DataDeletionDetectionPolicy): any { + return { "@odata.type": item["odatatype"] }; +} + +export function dataDeletionDetectionPolicyDeserializer(item: any): DataDeletionDetectionPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Alias for DataDeletionDetectionPolicyUnion */ +export type DataDeletionDetectionPolicyUnion = + | SoftDeleteColumnDeletionDetectionPolicy + | NativeBlobSoftDeleteDeletionDetectionPolicy + | DataDeletionDetectionPolicy; + +export function dataDeletionDetectionPolicyUnionSerializer( + item: DataDeletionDetectionPolicyUnion, +): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": + return softDeleteColumnDeletionDetectionPolicySerializer( + item as SoftDeleteColumnDeletionDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": + return nativeBlobSoftDeleteDeletionDetectionPolicySerializer( + item as NativeBlobSoftDeleteDeletionDetectionPolicy, + ); + + default: + return dataDeletionDetectionPolicySerializer(item); + } +} + +export function dataDeletionDetectionPolicyUnionDeserializer( + item: any, +): DataDeletionDetectionPolicyUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": + return softDeleteColumnDeletionDetectionPolicyDeserializer( + item as SoftDeleteColumnDeletionDetectionPolicy, + ); + + case "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": + return nativeBlobSoftDeleteDeletionDetectionPolicyDeserializer( + item as NativeBlobSoftDeleteDeletionDetectionPolicy, + ); + + default: + return dataDeletionDetectionPolicyDeserializer(item); + } +} + +/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */ +export interface SoftDeleteColumnDeletionDetectionPolicy extends DataDeletionDetectionPolicy { + /** The name of the column to use for soft-deletion detection. */ + softDeleteColumnName?: string; + /** The marker value that identifies an item as deleted. */ + softDeleteMarkerValue?: string; + /** A URI fragment specifying the type of data deletion detection policy. */ + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; +} + +export function softDeleteColumnDeletionDetectionPolicySerializer( + item: SoftDeleteColumnDeletionDetectionPolicy, +): any { + return { + "@odata.type": item["odatatype"], + softDeleteColumnName: item["softDeleteColumnName"], + softDeleteMarkerValue: item["softDeleteMarkerValue"], + }; +} + +export function softDeleteColumnDeletionDetectionPolicyDeserializer( + item: any, +): SoftDeleteColumnDeletionDetectionPolicy { + return { + odatatype: item["@odata.type"], + softDeleteColumnName: item["softDeleteColumnName"], + softDeleteMarkerValue: item["softDeleteMarkerValue"], + }; +} + +/** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */ +export interface NativeBlobSoftDeleteDeletionDetectionPolicy extends DataDeletionDetectionPolicy { + /** A URI fragment specifying the type of data deletion detection policy. */ + odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; +} + +export function nativeBlobSoftDeleteDeletionDetectionPolicySerializer( + item: NativeBlobSoftDeleteDeletionDetectionPolicy, +): any { + return { "@odata.type": item["odatatype"] }; +} + +export function nativeBlobSoftDeleteDeletionDetectionPolicyDeserializer( + item: any, +): NativeBlobSoftDeleteDeletionDetectionPolicy { + return { + odatatype: item["@odata.type"], + }; +} + +/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */ +export interface ListDataSourcesResult { + /** The datasources in the Search service. */ + dataSources: SearchIndexerDataSourceConnection[]; +} + +export function listDataSourcesResultDeserializer(item: any): ListDataSourcesResult { + return { + dataSources: searchIndexerDataSourceConnectionArrayDeserializer(item["value"]), + }; +} + +export function searchIndexerDataSourceConnectionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerDataSourceConnectionSerializer(item); + }); +} + +export function searchIndexerDataSourceConnectionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerDataSourceConnectionDeserializer(item); + }); +} + +/** The type of the keysOrIds. */ +export interface DocumentKeysOrIds { + /** document keys to be reset */ + documentKeys?: string[]; + /** datasource document identifiers to be reset */ + datasourceDocumentIds?: string[]; +} + +export function documentKeysOrIdsSerializer(item: DocumentKeysOrIds): any { + return { + documentKeys: !item["documentKeys"] + ? item["documentKeys"] + : item["documentKeys"].map((p: any) => { + return p; + }), + datasourceDocumentIds: !item["datasourceDocumentIds"] + ? item["datasourceDocumentIds"] + : item["datasourceDocumentIds"].map((p: any) => { + return p; + }), + }; +} + +/** Represents an indexer. */ +export interface SearchIndexer { + /** The name of the indexer. */ + name: string; + /** The description of the indexer. */ + description?: string; + /** The name of the datasource from which this indexer reads data. */ + dataSourceName: string; + /** The name of the skillset executing with this indexer. */ + skillsetName?: string; + /** The name of the index to which this indexer writes data. */ + targetIndexName: string; + /** The schedule for this indexer. */ + schedule?: IndexingSchedule; + /** Parameters for indexer execution. */ + parameters?: IndexingParameters; + /** Defines mappings between fields in the data source and corresponding target fields in the index. */ + fieldMappings?: FieldMapping[]; + /** Output field mappings are applied after enrichment and immediately before indexing. */ + outputFieldMappings?: FieldMapping[]; + /** A value indicating whether the indexer is disabled. Default is false. */ + isDisabled?: boolean; + /** The ETag of the indexer. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; + /** Adds caching to an enrichment pipeline to allow for incremental modification steps without having to rebuild the index every time. */ + cache?: SearchIndexerCache; +} + +export function searchIndexerSerializer(item: SearchIndexer): any { + return { + name: item["name"], + description: item["description"], + dataSourceName: item["dataSourceName"], + skillsetName: item["skillsetName"], + targetIndexName: item["targetIndexName"], + schedule: !item["schedule"] ? item["schedule"] : indexingScheduleSerializer(item["schedule"]), + parameters: !item["parameters"] + ? item["parameters"] + : indexingParametersSerializer(item["parameters"]), + fieldMappings: !item["fieldMappings"] + ? item["fieldMappings"] + : fieldMappingArraySerializer(item["fieldMappings"]), + outputFieldMappings: !item["outputFieldMappings"] + ? item["outputFieldMappings"] + : fieldMappingArraySerializer(item["outputFieldMappings"]), + disabled: item["isDisabled"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + cache: !item["cache"] ? item["cache"] : searchIndexerCacheSerializer(item["cache"]), + }; +} + +export function searchIndexerDeserializer(item: any): SearchIndexer { + return { + name: item["name"], + description: item["description"], + dataSourceName: item["dataSourceName"], + skillsetName: item["skillsetName"], + targetIndexName: item["targetIndexName"], + schedule: !item["schedule"] ? item["schedule"] : indexingScheduleDeserializer(item["schedule"]), + parameters: !item["parameters"] + ? item["parameters"] + : indexingParametersDeserializer(item["parameters"]), + fieldMappings: !item["fieldMappings"] + ? item["fieldMappings"] + : fieldMappingArrayDeserializer(item["fieldMappings"]), + outputFieldMappings: !item["outputFieldMappings"] + ? item["outputFieldMappings"] + : fieldMappingArrayDeserializer(item["outputFieldMappings"]), + isDisabled: item["disabled"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + cache: !item["cache"] ? item["cache"] : searchIndexerCacheDeserializer(item["cache"]), + }; +} + +/** Represents parameters for indexer execution. */ +export interface IndexingParameters { + /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */ + batchSize?: number; + /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */ + maxFailedItems?: number; + /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */ + maxFailedItemsPerBatch?: number; + /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + configuration?: IndexingParametersConfiguration; +} + +export function indexingParametersSerializer(item: IndexingParameters): any { + return { + batchSize: item["batchSize"], + maxFailedItems: item["maxFailedItems"], + maxFailedItemsPerBatch: item["maxFailedItemsPerBatch"], + configuration: !item["configuration"] + ? item["configuration"] + : indexingParametersConfigurationSerializer(item["configuration"]), + }; +} + +export function indexingParametersDeserializer(item: any): IndexingParameters { + return { + batchSize: item["batchSize"], + maxFailedItems: item["maxFailedItems"], + maxFailedItemsPerBatch: item["maxFailedItemsPerBatch"], + configuration: !item["configuration"] + ? item["configuration"] + : indexingParametersConfigurationDeserializer(item["configuration"]), + }; +} + +/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface IndexingParametersConfiguration { + /** Represents the parsing mode for indexing from an Azure blob data source. */ + parsingMode?: BlobIndexerParsingMode; + /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over those files during indexing. */ + excludedFileNameExtensions?: string; + /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. */ + indexedFileNameExtensions?: string; + /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */ + failOnUnsupportedContentType?: boolean; + /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */ + failOnUnprocessableDocument?: boolean; + /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */ + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */ + delimitedTextHeaders?: string; + /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). */ + delimitedTextDelimiter?: string; + /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */ + firstLineContainsHeaders?: boolean; + /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ + markdownParsingSubmode?: MarkdownParsingSubmode; + /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ + markdownHeaderDepth?: MarkdownHeaderDepth; + /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */ + documentRoot?: string; + /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ + dataToExtract?: BlobIndexerDataToExtract; + /** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */ + imageAction?: BlobIndexerImageAction; + /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */ + allowSkillsetToReadFileData?: boolean; + /** Determines algorithm for text extraction from PDF files in Azure blob storage. */ + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; + /** Specifies the environment in which the indexer should execute. */ + executionEnvironment?: IndexerExecutionEnvironment; + /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format "hh:mm:ss". */ + queryTimeout?: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function indexingParametersConfigurationSerializer( + item: IndexingParametersConfiguration, +): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + parsingMode: item["parsingMode"], + excludedFileNameExtensions: item["excludedFileNameExtensions"], + indexedFileNameExtensions: item["indexedFileNameExtensions"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextHeaders: item["delimitedTextHeaders"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + markdownParsingSubmode: item["markdownParsingSubmode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + documentRoot: item["documentRoot"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + executionEnvironment: item["executionEnvironment"], + queryTimeout: item["queryTimeout"], + }; +} + +export function indexingParametersConfigurationDeserializer( + item: any, +): IndexingParametersConfiguration { + return { + additionalProperties: serializeRecord(item, [ + "parsingMode", + "excludedFileNameExtensions", + "indexedFileNameExtensions", + "failOnUnsupportedContentType", + "failOnUnprocessableDocument", + "indexStorageMetadataOnlyForOversizedDocuments", + "delimitedTextHeaders", + "delimitedTextDelimiter", + "firstLineContainsHeaders", + "markdownParsingSubmode", + "markdownHeaderDepth", + "documentRoot", + "dataToExtract", + "imageAction", + "allowSkillsetToReadFileData", + "pdfTextRotationAlgorithm", + "executionEnvironment", + "queryTimeout", + ]), + parsingMode: item["parsingMode"], + excludedFileNameExtensions: item["excludedFileNameExtensions"], + indexedFileNameExtensions: item["indexedFileNameExtensions"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextHeaders: item["delimitedTextHeaders"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + markdownParsingSubmode: item["markdownParsingSubmode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + documentRoot: item["documentRoot"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + executionEnvironment: item["executionEnvironment"], + queryTimeout: item["queryTimeout"], + }; +} + +/** Specifies the environment in which the indexer should execute. */ +export enum KnownIndexerExecutionEnvironment { + /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */ + Standard = "standard", + /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */ + Private = "private", +} + +/** + * Specifies the environment in which the indexer should execute. \ + * {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **standard**: Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \ + * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. + */ +export type IndexerExecutionEnvironment = string; + +export function fieldMappingArraySerializer(result: Array): any[] { + return result.map((item) => { + return fieldMappingSerializer(item); + }); +} + +export function fieldMappingArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return fieldMappingDeserializer(item); + }); +} + +/** Defines a mapping between a field in a data source and a target field in an index. */ +export interface FieldMapping { + /** The name of the field in the data source. */ + sourceFieldName: string; + /** The name of the target field in the index. Same as the source field name by default. */ + targetFieldName?: string; + /** A function to apply to each source field value before indexing. */ + mappingFunction?: FieldMappingFunction; +} + +export function fieldMappingSerializer(item: FieldMapping): any { + return { + sourceFieldName: item["sourceFieldName"], + targetFieldName: item["targetFieldName"], + mappingFunction: !item["mappingFunction"] + ? item["mappingFunction"] + : fieldMappingFunctionSerializer(item["mappingFunction"]), + }; +} + +export function fieldMappingDeserializer(item: any): FieldMapping { + return { + sourceFieldName: item["sourceFieldName"], + targetFieldName: item["targetFieldName"], + mappingFunction: !item["mappingFunction"] + ? item["mappingFunction"] + : fieldMappingFunctionDeserializer(item["mappingFunction"]), + }; +} + +/** Represents a function that transforms a value from a data source before indexing. */ +export interface FieldMappingFunction { + /** The name of the field mapping function. */ + name: string; + /** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */ + parameters?: Record; +} + +export function fieldMappingFunctionSerializer(item: FieldMappingFunction): any { + return { name: item["name"], parameters: item["parameters"] }; +} + +export function fieldMappingFunctionDeserializer(item: any): FieldMappingFunction { + return { + name: item["name"], + parameters: item["parameters"], + }; +} + +/** The type of the cache. */ +export interface SearchIndexerCache { + /** The connection string to the storage account where the cache data will be persisted. */ + storageConnectionString?: string; + /** Specifies whether incremental reprocessing is enabled. */ + enableReprocessing?: boolean; + /** The user-assigned managed identity used for connections to the enrichment cache. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** A guid for the SearchIndexerCache. */ + id?: string; +} + +export function searchIndexerCacheSerializer(item: SearchIndexerCache): any { + return { + storageConnectionString: item["storageConnectionString"], + enableReprocessing: item["enableReprocessing"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + id: item["id"], + }; +} + +export function searchIndexerCacheDeserializer(item: any): SearchIndexerCache { + return { + storageConnectionString: item["storageConnectionString"], + enableReprocessing: item["enableReprocessing"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + id: item["id"], + }; +} + +/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */ +export interface ListIndexersResult { + /** The indexers in the Search service. */ + indexers: SearchIndexer[]; +} + +export function listIndexersResultDeserializer(item: any): ListIndexersResult { + return { + indexers: searchIndexerArrayDeserializer(item["value"]), + }; +} + +export function searchIndexerArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerSerializer(item); + }); +} + +export function searchIndexerArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerDeserializer(item); + }); +} + +/** Represents the current status and execution history of an indexer. */ +export interface SearchIndexerStatus { + /** The name of the indexer. */ + name: string; + /** Overall indexer status. */ + status: IndexerStatus; + /** The indexer's cumulative runtime consumption in the service. */ + runtime?: IndexerRuntime; + /** The result of the most recent or an in-progress indexer execution. */ + lastResult?: IndexerExecutionResult; + /** History of the recent indexer executions, sorted in reverse chronological order. */ + executionHistory: IndexerExecutionResult[]; + /** The execution limits for the indexer. */ + limits: SearchIndexerLimits; + /** All of the state that defines and dictates the indexer's current execution. */ + currentState?: IndexerCurrentState; +} + +export function searchIndexerStatusDeserializer(item: any): SearchIndexerStatus { + return { + name: item["name"], + status: item["status"], + runtime: !item["runtime"] ? item["runtime"] : indexerRuntimeDeserializer(item["runtime"]), + lastResult: !item["lastResult"] + ? item["lastResult"] + : indexerExecutionResultDeserializer(item["lastResult"]), + executionHistory: indexerExecutionResultArrayDeserializer(item["executionHistory"]), + limits: searchIndexerLimitsDeserializer(item["limits"]), + currentState: !item["currentState"] + ? item["currentState"] + : indexerCurrentStateDeserializer(item["currentState"]), + }; +} + +/** Represents the overall indexer status. */ +export enum KnownIndexerStatus { + /** Indicates that the indexer is in an unknown state. */ + Unknown = "unknown", + /** Indicates that the indexer experienced an error that cannot be corrected without human intervention. */ + Error = "error", + /** Indicates that the indexer is running normally. */ + Running = "running", +} + +/** + * Represents the overall indexer status. \ + * {@link KnownIndexerStatus} can be used interchangeably with IndexerStatus, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **unknown**: Indicates that the indexer is in an unknown state. \ + * **error**: Indicates that the indexer experienced an error that cannot be corrected without human intervention. \ + * **running**: Indicates that the indexer is running normally. + */ +export type IndexerStatus = string; + +/** Represents the result of an individual indexer execution. */ +export interface IndexerExecutionResult { + /** The outcome of this indexer execution. */ + status: IndexerExecutionStatus; + /** The outcome of this indexer execution. */ + readonly statusDetail?: IndexerExecutionStatusDetail; + /** The mode the indexer is running in. */ + readonly mode?: IndexingMode; + /** All of the state that defines and dictates the indexer's current execution. */ + readonly currentState?: IndexerCurrentState; + /** The error message indicating the top-level error, if any. */ + errorMessage?: string; + /** The start time of this indexer execution. */ + startTime?: Date; + /** The end time of this indexer execution, if the execution has already completed. */ + endTime?: Date; + /** The item-level indexing errors. */ + errors: SearchIndexerError[]; + /** The item-level indexing warnings. */ + warnings: SearchIndexerWarning[]; + /** The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. */ + itemCount: number; + /** The number of items that failed to be indexed during this indexer execution. */ + failedItemCount: number; + /** Change tracking state with which an indexer execution started. */ + initialTrackingState?: string; + /** Change tracking state with which an indexer execution finished. */ + finalTrackingState?: string; +} + +export function indexerExecutionResultDeserializer(item: any): IndexerExecutionResult { + return { + status: item["status"], + statusDetail: item["statusDetail"], + mode: item["mode"], + currentState: !item["currentState"] + ? item["currentState"] + : indexerCurrentStateDeserializer(item["currentState"]), + errorMessage: item["errorMessage"], + startTime: !item["startTime"] ? item["startTime"] : new Date(item["startTime"]), + endTime: !item["endTime"] ? item["endTime"] : new Date(item["endTime"]), + errors: searchIndexerErrorArrayDeserializer(item["errors"]), + warnings: searchIndexerWarningArrayDeserializer(item["warnings"]), + itemCount: item["itemsProcessed"], + failedItemCount: item["itemsFailed"], + initialTrackingState: item["initialTrackingState"], + finalTrackingState: item["finalTrackingState"], + }; +} + +/** Represents the status of an individual indexer execution. */ +export enum KnownIndexerExecutionStatus { + /** An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. */ + TransientFailure = "transientFailure", + /** Indexer execution completed successfully. */ + Success = "success", + /** Indexer execution is in progress. */ + InProgress = "inProgress", + /** Indexer has been reset. */ + Reset = "reset", +} + +/** + * Represents the status of an individual indexer execution. \ + * {@link KnownIndexerExecutionStatus} can be used interchangeably with IndexerExecutionStatus, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **transientFailure**: An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. \ + * **success**: Indexer execution completed successfully. \ + * **inProgress**: Indexer execution is in progress. \ + * **reset**: Indexer has been reset. + */ +export type IndexerExecutionStatus = string; + +/** Details the status of an individual indexer execution. */ +export enum KnownIndexerExecutionStatusDetail { + /** Indicates that the reset that occurred was for a call to ResetDocs. */ + ResetDocs = "resetDocs", + /** Indicates to selectively resync based on option(s) from data source. */ + Resync = "resync", +} + +/** + * Details the status of an individual indexer execution. \ + * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs. \ + * **resync**: Indicates to selectively resync based on option(s) from data source. + */ +export type IndexerExecutionStatusDetail = string; + +/** Represents the mode the indexer is executing in. */ +export enum KnownIndexingMode { + /** The indexer is indexing all documents in the datasource. */ + IndexingAllDocs = "indexingAllDocs", + /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */ + IndexingResetDocs = "indexingResetDocs", + /** The indexer is resyncing and indexing selective option(s) from the datasource. */ + IndexingResync = "indexingResync", +} + +/** + * Represents the mode the indexer is executing in. \ + * {@link KnownIndexingMode} can be used interchangeably with IndexingMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \ + * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. \ + * **indexingResync**: The indexer is resyncing and indexing selective option(s) from the datasource. + */ +export type IndexingMode = string; + +/** Represents all of the state that defines and dictates the indexer's current execution. */ +export interface IndexerCurrentState { + /** The mode the indexer is running in. */ + readonly mode?: IndexingMode; + /** Change tracking state used when indexing starts on all documents in the datasource. */ + readonly allDocsInitialTrackingState?: string; + /** Change tracking state value when indexing finishes on all documents in the datasource. */ + readonly allDocsFinalTrackingState?: string; + /** Change tracking state used when indexing starts on select, reset documents in the datasource. */ + readonly resetDocsInitialTrackingState?: string; + /** Change tracking state value when indexing finishes on select, reset documents in the datasource. */ + readonly resetDocsFinalTrackingState?: string; + /** Change tracking state used when indexing starts on selective options from the datasource. */ + readonly resyncInitialTrackingState?: string; + /** Change tracking state value when indexing finishes on selective options from the datasource. */ + readonly resyncFinalTrackingState?: string; + /** The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys. */ + readonly resetDocumentKeys?: string[]; + /** The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids. */ + readonly resetDatasourceDocumentIds?: string[]; +} + +export function indexerCurrentStateDeserializer(item: any): IndexerCurrentState { + return { + mode: item["mode"], + allDocsInitialTrackingState: item["allDocsInitialTrackingState"], + allDocsFinalTrackingState: item["allDocsFinalTrackingState"], + resetDocsInitialTrackingState: item["resetDocsInitialTrackingState"], + resetDocsFinalTrackingState: item["resetDocsFinalTrackingState"], + resyncInitialTrackingState: item["resyncInitialTrackingState"], + resyncFinalTrackingState: item["resyncFinalTrackingState"], + resetDocumentKeys: !item["resetDocumentKeys"] + ? item["resetDocumentKeys"] + : item["resetDocumentKeys"].map((p: any) => { + return p; + }), + resetDatasourceDocumentIds: !item["resetDatasourceDocumentIds"] + ? item["resetDatasourceDocumentIds"] + : item["resetDatasourceDocumentIds"].map((p: any) => { + return p; + }), + }; +} + +export function searchIndexerErrorArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerErrorDeserializer(item); + }); +} + +/** Represents an item- or document-level indexing error. */ +export interface SearchIndexerError { + /** The key of the item for which indexing failed. */ + key?: string; + /** The message describing the error that occurred while processing the item. */ + errorMessage: string; + /** The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. */ + statusCode: number; + /** The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. */ + name?: string; + /** Additional, verbose details about the error to assist in debugging the indexer. This may not be always available. */ + details?: string; + /** A link to a troubleshooting guide for these classes of errors. This may not be always available. */ + documentationLink?: string; +} + +export function searchIndexerErrorDeserializer(item: any): SearchIndexerError { + return { + key: item["key"], + errorMessage: item["errorMessage"], + statusCode: item["statusCode"], + name: item["name"], + details: item["details"], + documentationLink: item["documentationLink"], + }; +} + +export function searchIndexerWarningArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerWarningDeserializer(item); + }); +} + +/** Represents an item-level warning. */ +export interface SearchIndexerWarning { + /** The key of the item which generated a warning. */ + key?: string; + /** The message describing the warning that occurred while processing the item. */ + message: string; + /** The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available. */ + name?: string; + /** Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available. */ + details?: string; + /** A link to a troubleshooting guide for these classes of warnings. This may not be always available. */ + documentationLink?: string; +} + +export function searchIndexerWarningDeserializer(item: any): SearchIndexerWarning { + return { + key: item["key"], + message: item["message"], + name: item["name"], + details: item["details"], + documentationLink: item["documentationLink"], + }; +} + +export function indexerExecutionResultArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return indexerExecutionResultDeserializer(item); + }); +} + +/** Represents the limits that can be applied to an indexer. */ +export interface SearchIndexerLimits { + /** The maximum duration that the indexer is permitted to run for one execution. */ + maxRunTime?: string; + /** The maximum size of a document, in bytes, which will be considered valid for indexing. */ + maxDocumentExtractionSize?: number; + /** The maximum number of characters that will be extracted from a document picked up for indexing. */ + maxDocumentContentCharactersToExtract?: number; +} + +export function searchIndexerLimitsDeserializer(item: any): SearchIndexerLimits { + return { + maxRunTime: item["maxRunTime"], + maxDocumentExtractionSize: item["maxDocumentExtractionSize"], + maxDocumentContentCharactersToExtract: item["maxDocumentContentCharactersToExtract"], + }; +} + +/** A list of skills. */ +export interface SearchIndexerSkillset { + /** The name of the skillset. */ + name: string; + /** The description of the skillset. */ + description?: string; + /** A list of skills in the skillset. */ + skills: SearchIndexerSkillUnion[]; + /** Details about the Azure AI service to be used when running skills. */ + cognitiveServicesAccount?: CognitiveServicesAccountUnion; + /** Definition of additional projections to Azure blob, table, or files, of enriched data. */ + knowledgeStore?: SearchIndexerKnowledgeStore; + /** Definition of additional projections to secondary search index(es). */ + indexProjection?: SearchIndexerIndexProjection; + /** The ETag of the skillset. */ + eTag?: string; + /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition. Once you have encrypted your skillset definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ + encryptionKey?: SearchResourceEncryptionKey; +} + +export function searchIndexerSkillsetSerializer(item: SearchIndexerSkillset): any { + return { + name: item["name"], + description: item["description"], + skills: searchIndexerSkillUnionArraySerializer(item["skills"]), + cognitiveServices: !item["cognitiveServicesAccount"] + ? item["cognitiveServicesAccount"] + : cognitiveServicesAccountUnionSerializer(item["cognitiveServicesAccount"]), + knowledgeStore: !item["knowledgeStore"] + ? item["knowledgeStore"] + : searchIndexerKnowledgeStoreSerializer(item["knowledgeStore"]), + indexProjections: !item["indexProjection"] + ? item["indexProjection"] + : searchIndexerIndexProjectionSerializer(item["indexProjection"]), + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + }; +} + +export function searchIndexerSkillsetDeserializer(item: any): SearchIndexerSkillset { + return { + name: item["name"], + description: item["description"], + skills: searchIndexerSkillUnionArrayDeserializer(item["skills"]), + cognitiveServicesAccount: !item["cognitiveServices"] + ? item["cognitiveServices"] + : cognitiveServicesAccountUnionDeserializer(item["cognitiveServices"]), + knowledgeStore: !item["knowledgeStore"] + ? item["knowledgeStore"] + : searchIndexerKnowledgeStoreDeserializer(item["knowledgeStore"]), + indexProjection: !item["indexProjections"] + ? item["indexProjections"] + : searchIndexerIndexProjectionDeserializer(item["indexProjections"]), + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + }; +} + +export function searchIndexerSkillUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerSkillUnionSerializer(item); + }); +} + +export function searchIndexerSkillUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerSkillUnionDeserializer(item); + }); +} + +/** Base type for skills. */ +export interface SearchIndexerSkill { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Skills.Util.ConditionalSkill, #Microsoft.Skills.Text.KeyPhraseExtractionSkill, #Microsoft.Skills.Vision.OcrSkill, #Microsoft.Skills.Vision.ImageAnalysisSkill, #Microsoft.Skills.Text.LanguageDetectionSkill, #Microsoft.Skills.Util.ShaperSkill, #Microsoft.Skills.Text.MergeSkill, #Microsoft.Skills.Text.EntityRecognitionSkill, #Microsoft.Skills.Text.SentimentSkill, #Microsoft.Skills.Text.V3.SentimentSkill, #Microsoft.Skills.Text.V3.EntityLinkingSkill, #Microsoft.Skills.Text.V3.EntityRecognitionSkill, #Microsoft.Skills.Text.PIIDetectionSkill, #Microsoft.Skills.Text.SplitSkill, #Microsoft.Skills.Text.CustomEntityLookupSkill, #Microsoft.Skills.Text.TranslationSkill, #Microsoft.Skills.Util.DocumentExtractionSkill, #Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill, #Microsoft.Skills.Custom.WebApiSkill, #Microsoft.Skills.Custom.AmlSkill, #Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill, #Microsoft.Skills.Vision.VectorizeSkill, #Microsoft.Skills.Util.ContentUnderstandingSkill, #Microsoft.Skills.Custom.ChatCompletionSkill */ + odatatype: string; + /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */ + name?: string; + /** The description of the skill which describes the inputs, outputs, and usage of the skill. */ + description?: string; + /** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */ + context?: string; + /** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */ + inputs: InputFieldMappingEntry[]; + /** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */ + outputs: OutputFieldMappingEntry[]; +} + +export function searchIndexerSkillSerializer(item: SearchIndexerSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + }; +} + +export function searchIndexerSkillDeserializer(item: any): SearchIndexerSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + }; +} + +/** Alias for SearchIndexerSkillUnion */ +export type SearchIndexerSkillUnion = + | ConditionalSkill + | KeyPhraseExtractionSkill + | OcrSkill + | ImageAnalysisSkill + | LanguageDetectionSkill + | ShaperSkill + | MergeSkill + | EntityRecognitionSkill + | SentimentSkill + | SentimentSkillV3 + | EntityLinkingSkill + | EntityRecognitionSkillV3 + | PIIDetectionSkill + | SplitSkill + | CustomEntityLookupSkill + | TextTranslationSkill + | DocumentExtractionSkill + | DocumentIntelligenceLayoutSkill + | WebApiSkill + | AzureMachineLearningSkill + | AzureOpenAIEmbeddingSkill + | VisionVectorizeSkill + | ContentUnderstandingSkill + | ChatCompletionSkill + | SearchIndexerSkill; + +export function searchIndexerSkillUnionSerializer(item: SearchIndexerSkillUnion): any { + switch (item.odatatype) { + case "#Microsoft.Skills.Util.ConditionalSkill": + return conditionalSkillSerializer(item as ConditionalSkill); + + case "#Microsoft.Skills.Text.KeyPhraseExtractionSkill": + return keyPhraseExtractionSkillSerializer(item as KeyPhraseExtractionSkill); + + case "#Microsoft.Skills.Vision.OcrSkill": + return ocrSkillSerializer(item as OcrSkill); + + case "#Microsoft.Skills.Vision.ImageAnalysisSkill": + return imageAnalysisSkillSerializer(item as ImageAnalysisSkill); + + case "#Microsoft.Skills.Text.LanguageDetectionSkill": + return languageDetectionSkillSerializer(item as LanguageDetectionSkill); + + case "#Microsoft.Skills.Util.ShaperSkill": + return shaperSkillSerializer(item as ShaperSkill); + + case "#Microsoft.Skills.Text.MergeSkill": + return mergeSkillSerializer(item as MergeSkill); + + case "#Microsoft.Skills.Text.EntityRecognitionSkill": + return entityRecognitionSkillSerializer(item as EntityRecognitionSkill); + + case "#Microsoft.Skills.Text.SentimentSkill": + return sentimentSkillSerializer(item as SentimentSkill); + + case "#Microsoft.Skills.Text.V3.SentimentSkill": + return sentimentSkillV3Serializer(item as SentimentSkillV3); + + case "#Microsoft.Skills.Text.V3.EntityLinkingSkill": + return entityLinkingSkillSerializer(item as EntityLinkingSkill); + + case "#Microsoft.Skills.Text.V3.EntityRecognitionSkill": + return entityRecognitionSkillV3Serializer(item as EntityRecognitionSkillV3); + + case "#Microsoft.Skills.Text.PIIDetectionSkill": + return piiDetectionSkillSerializer(item as PIIDetectionSkill); + + case "#Microsoft.Skills.Text.SplitSkill": + return splitSkillSerializer(item as SplitSkill); + + case "#Microsoft.Skills.Text.CustomEntityLookupSkill": + return customEntityLookupSkillSerializer(item as CustomEntityLookupSkill); + + case "#Microsoft.Skills.Text.TranslationSkill": + return textTranslationSkillSerializer(item as TextTranslationSkill); + + case "#Microsoft.Skills.Util.DocumentExtractionSkill": + return documentExtractionSkillSerializer(item as DocumentExtractionSkill); + + case "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": + return documentIntelligenceLayoutSkillSerializer(item as DocumentIntelligenceLayoutSkill); + + case "#Microsoft.Skills.Custom.WebApiSkill": + return webApiSkillSerializer(item as WebApiSkill); + + case "#Microsoft.Skills.Custom.AmlSkill": + return azureMachineLearningSkillSerializer(item as AzureMachineLearningSkill); + + case "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": + return azureOpenAIEmbeddingSkillSerializer(item as AzureOpenAIEmbeddingSkill); + + case "#Microsoft.Skills.Vision.VectorizeSkill": + return visionVectorizeSkillSerializer(item as VisionVectorizeSkill); + + case "#Microsoft.Skills.Util.ContentUnderstandingSkill": + return contentUnderstandingSkillSerializer(item as ContentUnderstandingSkill); + + case "#Microsoft.Skills.Custom.ChatCompletionSkill": + return chatCompletionSkillSerializer(item as ChatCompletionSkill); + + default: + return searchIndexerSkillSerializer(item); + } +} + +export function searchIndexerSkillUnionDeserializer(item: any): SearchIndexerSkillUnion { + switch (item.odatatype) { + case "#Microsoft.Skills.Util.ConditionalSkill": + return conditionalSkillDeserializer(item as ConditionalSkill); + + case "#Microsoft.Skills.Text.KeyPhraseExtractionSkill": + return keyPhraseExtractionSkillDeserializer(item as KeyPhraseExtractionSkill); + + case "#Microsoft.Skills.Vision.OcrSkill": + return ocrSkillDeserializer(item as OcrSkill); + + case "#Microsoft.Skills.Vision.ImageAnalysisSkill": + return imageAnalysisSkillDeserializer(item as ImageAnalysisSkill); + + case "#Microsoft.Skills.Text.LanguageDetectionSkill": + return languageDetectionSkillDeserializer(item as LanguageDetectionSkill); + + case "#Microsoft.Skills.Util.ShaperSkill": + return shaperSkillDeserializer(item as ShaperSkill); + + case "#Microsoft.Skills.Text.MergeSkill": + return mergeSkillDeserializer(item as MergeSkill); + + case "#Microsoft.Skills.Text.EntityRecognitionSkill": + return entityRecognitionSkillDeserializer(item as EntityRecognitionSkill); + + case "#Microsoft.Skills.Text.SentimentSkill": + return sentimentSkillDeserializer(item as SentimentSkill); + + case "#Microsoft.Skills.Text.V3.SentimentSkill": + return sentimentSkillV3Deserializer(item as SentimentSkillV3); + + case "#Microsoft.Skills.Text.V3.EntityLinkingSkill": + return entityLinkingSkillDeserializer(item as EntityLinkingSkill); + + case "#Microsoft.Skills.Text.V3.EntityRecognitionSkill": + return entityRecognitionSkillV3Deserializer(item as EntityRecognitionSkillV3); + + case "#Microsoft.Skills.Text.PIIDetectionSkill": + return piiDetectionSkillDeserializer(item as PIIDetectionSkill); + + case "#Microsoft.Skills.Text.SplitSkill": + return splitSkillDeserializer(item as SplitSkill); + + case "#Microsoft.Skills.Text.CustomEntityLookupSkill": + return customEntityLookupSkillDeserializer(item as CustomEntityLookupSkill); + + case "#Microsoft.Skills.Text.TranslationSkill": + return textTranslationSkillDeserializer(item as TextTranslationSkill); + + case "#Microsoft.Skills.Util.DocumentExtractionSkill": + return documentExtractionSkillDeserializer(item as DocumentExtractionSkill); + + case "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": + return documentIntelligenceLayoutSkillDeserializer(item as DocumentIntelligenceLayoutSkill); + + case "#Microsoft.Skills.Custom.WebApiSkill": + return webApiSkillDeserializer(item as WebApiSkill); + + case "#Microsoft.Skills.Custom.AmlSkill": + return azureMachineLearningSkillDeserializer(item as AzureMachineLearningSkill); + + case "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": + return azureOpenAIEmbeddingSkillDeserializer(item as AzureOpenAIEmbeddingSkill); + + case "#Microsoft.Skills.Vision.VectorizeSkill": + return visionVectorizeSkillDeserializer(item as VisionVectorizeSkill); + + case "#Microsoft.Skills.Util.ContentUnderstandingSkill": + return contentUnderstandingSkillDeserializer(item as ContentUnderstandingSkill); + + case "#Microsoft.Skills.Custom.ChatCompletionSkill": + return chatCompletionSkillDeserializer(item as ChatCompletionSkill); + + default: + return searchIndexerSkillDeserializer(item); + } +} + +export function inputFieldMappingEntryArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return inputFieldMappingEntrySerializer(item); + }); +} + +export function inputFieldMappingEntryArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return inputFieldMappingEntryDeserializer(item); + }); +} + +/** Input field mapping for a skill. */ +export interface InputFieldMappingEntry { + /** The name of the input. */ + name: string; + /** The source of the input. */ + source?: string; + /** The source context used for selecting recursive inputs. */ + sourceContext?: string; + /** The recursive inputs used when creating a complex type. */ + inputs?: InputFieldMappingEntry[]; +} + +export function inputFieldMappingEntrySerializer(item: InputFieldMappingEntry): any { + return { + name: item["name"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function inputFieldMappingEntryDeserializer(item: any): InputFieldMappingEntry { + return { + name: item["name"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +export function outputFieldMappingEntryArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return outputFieldMappingEntrySerializer(item); + }); +} + +export function outputFieldMappingEntryArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return outputFieldMappingEntryDeserializer(item); + }); +} + +/** Output field mapping for a skill. */ +export interface OutputFieldMappingEntry { + /** The name of the output defined by the skill. */ + name: string; + /** The target name of the output. It is optional and default to name. */ + targetName?: string; +} + +export function outputFieldMappingEntrySerializer(item: OutputFieldMappingEntry): any { + return { name: item["name"], targetName: item["targetName"] }; +} + +export function outputFieldMappingEntryDeserializer(item: any): OutputFieldMappingEntry { + return { + name: item["name"], + targetName: item["targetName"], + }; +} + +/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */ +export interface ConditionalSkill extends SearchIndexerSkill { + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.ConditionalSkill"; +} + +export function conditionalSkillSerializer(item: ConditionalSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + }; +} + +export function conditionalSkillDeserializer(item: any): ConditionalSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + }; +} + +/** A skill that uses text analytics for key phrase extraction. */ +export interface KeyPhraseExtractionSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; + /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */ + maxKeyPhraseCount?: number; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill"; +} + +export function keyPhraseExtractionSkillSerializer(item: KeyPhraseExtractionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + maxKeyPhraseCount: item["maxKeyPhraseCount"], + modelVersion: item["modelVersion"], + }; +} + +export function keyPhraseExtractionSkillDeserializer(item: any): KeyPhraseExtractionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + maxKeyPhraseCount: item["maxKeyPhraseCount"], + modelVersion: item["modelVersion"], + }; +} + +/** The language codes supported for input text by KeyPhraseExtractionSkill. */ +export enum KnownKeyPhraseExtractionSkillLanguage { + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Portuguese (Brazil) */ + PtBR = "pt-BR", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", +} + +/** + * The language codes supported for input text by KeyPhraseExtractionSkill. \ + * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **pt-BR**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish + */ +export type KeyPhraseExtractionSkillLanguage = string; + +/** A skill that extracts text from image files. */ +export interface OcrSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: OcrSkillLanguage; + /** A value indicating to turn orientation detection on or not. Default is false. */ + shouldDetectOrientation?: boolean; + /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */ + lineEnding?: OcrLineEnding; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Vision.OcrSkill"; +} + +export function ocrSkillSerializer(item: OcrSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + detectOrientation: item["shouldDetectOrientation"], + lineEnding: item["lineEnding"], + }; +} + +export function ocrSkillDeserializer(item: any): OcrSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + shouldDetectOrientation: item["detectOrientation"], + lineEnding: item["lineEnding"], + }; +} + +/** The language codes supported for input by OcrSkill. */ +export enum KnownOcrSkillLanguage { + /** Afrikaans */ + Af = "af", + /** Albanian */ + Sq = "sq", + /** Angika (Devanagiri) */ + Anp = "anp", + /** Arabic */ + Ar = "ar", + /** Asturian */ + Ast = "ast", + /** Awadhi-Hindi (Devanagiri) */ + Awa = "awa", + /** Azerbaijani (Latin) */ + Az = "az", + /** Bagheli */ + Bfy = "bfy", + /** Basque */ + Eu = "eu", + /** Belarusian (Cyrillic and Latin) */ + Be = "be", + /** Belarusian (Cyrillic) */ + BeCyrl = "be-cyrl", + /** Belarusian (Latin) */ + BeLatn = "be-latn", + /** Bhojpuri-Hindi (Devanagiri) */ + Bho = "bho", + /** Bislama */ + Bi = "bi", + /** Bodo (Devanagiri) */ + Brx = "brx", + /** Bosnian Latin */ + Bs = "bs", + /** Brajbha */ + Bra = "bra", + /** Breton */ + Br = "br", + /** Bulgarian */ + Bg = "bg", + /** Bundeli */ + Bns = "bns", + /** Buryat (Cyrillic) */ + Bua = "bua", + /** Catalan */ + Ca = "ca", + /** Cebuano */ + Ceb = "ceb", + /** Chamling */ + Rab = "rab", + /** Chamorro */ + Ch = "ch", + /** Chhattisgarhi (Devanagiri) */ + Hne = "hne", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", + /** Cornish */ + Kw = "kw", + /** Corsican */ + Co = "co", + /** Crimean Tatar (Latin) */ + Crh = "crh", + /** Croatian */ + Hr = "hr", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** Dari */ + Prs = "prs", + /** Dhimal (Devanagiri) */ + Dhi = "dhi", + /** Dogri (Devanagiri) */ + Doi = "doi", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Erzya (Cyrillic) */ + Myv = "myv", + /** Estonian */ + Et = "et", + /** Faroese */ + Fo = "fo", + /** Fijian */ + Fj = "fj", + /** Filipino */ + Fil = "fil", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Frulian */ + Fur = "fur", + /** Gagauz (Latin) */ + Gag = "gag", + /** Galician */ + Gl = "gl", + /** German */ + De = "de", + /** Gilbertese */ + Gil = "gil", + /** Gondi (Devanagiri) */ + Gon = "gon", + /** Greek */ + El = "el", + /** Greenlandic */ + Kl = "kl", + /** Gurung (Devanagiri) */ + Gvr = "gvr", + /** Haitian Creole */ + Ht = "ht", + /** Halbi (Devanagiri) */ + Hlb = "hlb", + /** Hani */ + Hni = "hni", + /** Haryanvi */ + Bgc = "bgc", + /** Hawaiian */ + Haw = "haw", + /** Hindi */ + Hi = "hi", + /** Hmong Daw (Latin) */ + Mww = "mww", + /** Ho (Devanagiri) */ + Hoc = "hoc", + /** Hungarian */ + Hu = "hu", + /** Icelandic */ + Is = "is", + /** Inari Sami */ + Smn = "smn", + /** Indonesian */ + Id = "id", + /** Interlingua */ + Ia = "ia", + /** Inuktitut (Latin) */ + Iu = "iu", + /** Irish */ + Ga = "ga", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Jaunsari (Devanagiri) */ + Jns = "Jns", + /** Javanese */ + Jv = "jv", + /** Kabuverdianu */ + Kea = "kea", + /** Kachin (Latin) */ + Kac = "kac", + /** Kangri (Devanagiri) */ + Xnr = "xnr", + /** Karachay-Balkar */ + Krc = "krc", + /** Kara-Kalpak (Cyrillic) */ + KaaCyrl = "kaa-cyrl", + /** Kara-Kalpak (Latin) */ + Kaa = "kaa", + /** Kashubian */ + Csb = "csb", + /** Kazakh (Cyrillic) */ + KkCyrl = "kk-cyrl", + /** Kazakh (Latin) */ + KkLatn = "kk-latn", + /** Khaling */ + Klr = "klr", + /** Khasi */ + Kha = "kha", + /** K'iche' */ + Quc = "quc", + /** Korean */ + Ko = "ko", + /** Korku */ + Kfq = "kfq", + /** Koryak */ + Kpy = "kpy", + /** Kosraean */ + Kos = "kos", + /** Kumyk (Cyrillic) */ + Kum = "kum", + /** Kurdish (Arabic) */ + KuArab = "ku-arab", + /** Kurdish (Latin) */ + KuLatn = "ku-latn", + /** Kurukh (Devanagiri) */ + Kru = "kru", + /** Kyrgyz (Cyrillic) */ + Ky = "ky", + /** Lakota */ + Lkt = "lkt", + /** Latin */ + La = "la", + /** Lithuanian */ + Lt = "lt", + /** Lower Sorbian */ + Dsb = "dsb", + /** Lule Sami */ + Smj = "smj", + /** Luxembourgish */ + Lb = "lb", + /** Mahasu Pahari (Devanagiri) */ + Bfz = "bfz", + /** Malay (Latin) */ + Ms = "ms", + /** Maltese */ + Mt = "mt", + /** Malto (Devanagiri) */ + Kmj = "kmj", + /** Manx */ + Gv = "gv", + /** Maori */ + Mi = "mi", + /** Marathi */ + Mr = "mr", + /** Mongolian (Cyrillic) */ + Mn = "mn", + /** Montenegrin (Cyrillic) */ + CnrCyrl = "cnr-cyrl", + /** Montenegrin (Latin) */ + CnrLatn = "cnr-latn", + /** Neapolitan */ + Nap = "nap", + /** Nepali */ + Ne = "ne", + /** Niuean */ + Niu = "niu", + /** Nogay */ + Nog = "nog", + /** Northern Sami (Latin) */ + Sme = "sme", + /** Norwegian */ + Nb = "nb", + /** Norwegian */ + No = "no", + /** Occitan */ + Oc = "oc", + /** Ossetic */ + Os = "os", + /** Pashto */ + Ps = "ps", + /** Persian */ + Fa = "fa", + /** Polish */ + Pl = "pl", + /** Portuguese */ + Pt = "pt", + /** Punjabi (Arabic) */ + Pa = "pa", + /** Ripuarian */ + Ksh = "ksh", + /** Romanian */ + Ro = "ro", + /** Romansh */ + Rm = "rm", + /** Russian */ + Ru = "ru", + /** Sadri (Devanagiri) */ + Sck = "sck", + /** Samoan (Latin) */ + Sm = "sm", + /** Sanskrit (Devanagiri) */ + Sa = "sa", + /** Santali (Devanagiri) */ + Sat = "sat", + /** Scots */ + Sco = "sco", + /** Scottish Gaelic */ + Gd = "gd", + /** Serbian (Latin) */ + Sr = "sr", + /** Serbian (Cyrillic) */ + SrCyrl = "sr-Cyrl", + /** Serbian (Latin) */ + SrLatn = "sr-Latn", + /** Sherpa (Devanagiri) */ + Xsr = "xsr", + /** Sirmauri (Devanagiri) */ + Srx = "srx", + /** Skolt Sami */ + Sms = "sms", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Somali (Arabic) */ + So = "so", + /** Southern Sami */ + Sma = "sma", + /** Spanish */ + Es = "es", + /** Swahili (Latin) */ + Sw = "sw", + /** Swedish */ + Sv = "sv", + /** Tajik (Cyrillic) */ + Tg = "tg", + /** Tatar (Latin) */ + Tt = "tt", + /** Tetum */ + Tet = "tet", + /** Thangmi */ + Thf = "thf", + /** Tongan */ + To = "to", + /** Turkish */ + Tr = "tr", + /** Turkmen (Latin) */ + Tk = "tk", + /** Tuvan */ + Tyv = "tyv", + /** Upper Sorbian */ + Hsb = "hsb", + /** Urdu */ + Ur = "ur", + /** Uyghur (Arabic) */ + Ug = "ug", + /** Uzbek (Arabic) */ + UzArab = "uz-arab", + /** Uzbek (Cyrillic) */ + UzCyrl = "uz-cyrl", + /** Uzbek (Latin) */ + Uz = "uz", + /** Volapük */ + Vo = "vo", + /** Walser */ + Wae = "wae", + /** Welsh */ + Cy = "cy", + /** Western Frisian */ + Fy = "fy", + /** Yucatec Maya */ + Yua = "yua", + /** Zhuang */ + Za = "za", + /** Zulu */ + Zu = "zu", + /** Unknown (All) */ + Unk = "unk", +} + +/** + * The language codes supported for input by OcrSkill. \ + * {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **af**: Afrikaans \ + * **sq**: Albanian \ + * **anp**: Angika (Devanagiri) \ + * **ar**: Arabic \ + * **ast**: Asturian \ + * **awa**: Awadhi-Hindi (Devanagiri) \ + * **az**: Azerbaijani (Latin) \ + * **bfy**: Bagheli \ + * **eu**: Basque \ + * **be**: Belarusian (Cyrillic and Latin) \ + * **be-cyrl**: Belarusian (Cyrillic) \ + * **be-latn**: Belarusian (Latin) \ + * **bho**: Bhojpuri-Hindi (Devanagiri) \ + * **bi**: Bislama \ + * **brx**: Bodo (Devanagiri) \ + * **bs**: Bosnian Latin \ + * **bra**: Brajbha \ + * **br**: Breton \ + * **bg**: Bulgarian \ + * **bns**: Bundeli \ + * **bua**: Buryat (Cyrillic) \ + * **ca**: Catalan \ + * **ceb**: Cebuano \ + * **rab**: Chamling \ + * **ch**: Chamorro \ + * **hne**: Chhattisgarhi (Devanagiri) \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional \ + * **kw**: Cornish \ + * **co**: Corsican \ + * **crh**: Crimean Tatar (Latin) \ + * **hr**: Croatian \ + * **cs**: Czech \ + * **da**: Danish \ + * **prs**: Dari \ + * **dhi**: Dhimal (Devanagiri) \ + * **doi**: Dogri (Devanagiri) \ + * **nl**: Dutch \ + * **en**: English \ + * **myv**: Erzya (Cyrillic) \ + * **et**: Estonian \ + * **fo**: Faroese \ + * **fj**: Fijian \ + * **fil**: Filipino \ + * **fi**: Finnish \ + * **fr**: French \ + * **fur**: Frulian \ + * **gag**: Gagauz (Latin) \ + * **gl**: Galician \ + * **de**: German \ + * **gil**: Gilbertese \ + * **gon**: Gondi (Devanagiri) \ + * **el**: Greek \ + * **kl**: Greenlandic \ + * **gvr**: Gurung (Devanagiri) \ + * **ht**: Haitian Creole \ + * **hlb**: Halbi (Devanagiri) \ + * **hni**: Hani \ + * **bgc**: Haryanvi \ + * **haw**: Hawaiian \ + * **hi**: Hindi \ + * **mww**: Hmong Daw (Latin) \ + * **hoc**: Ho (Devanagiri) \ + * **hu**: Hungarian \ + * **is**: Icelandic \ + * **smn**: Inari Sami \ + * **id**: Indonesian \ + * **ia**: Interlingua \ + * **iu**: Inuktitut (Latin) \ + * **ga**: Irish \ + * **it**: Italian \ + * **ja**: Japanese \ + * **Jns**: Jaunsari (Devanagiri) \ + * **jv**: Javanese \ + * **kea**: Kabuverdianu \ + * **kac**: Kachin (Latin) \ + * **xnr**: Kangri (Devanagiri) \ + * **krc**: Karachay-Balkar \ + * **kaa-cyrl**: Kara-Kalpak (Cyrillic) \ + * **kaa**: Kara-Kalpak (Latin) \ + * **csb**: Kashubian \ + * **kk-cyrl**: Kazakh (Cyrillic) \ + * **kk-latn**: Kazakh (Latin) \ + * **klr**: Khaling \ + * **kha**: Khasi \ + * **quc**: K'iche' \ + * **ko**: Korean \ + * **kfq**: Korku \ + * **kpy**: Koryak \ + * **kos**: Kosraean \ + * **kum**: Kumyk (Cyrillic) \ + * **ku-arab**: Kurdish (Arabic) \ + * **ku-latn**: Kurdish (Latin) \ + * **kru**: Kurukh (Devanagiri) \ + * **ky**: Kyrgyz (Cyrillic) \ + * **lkt**: Lakota \ + * **la**: Latin \ + * **lt**: Lithuanian \ + * **dsb**: Lower Sorbian \ + * **smj**: Lule Sami \ + * **lb**: Luxembourgish \ + * **bfz**: Mahasu Pahari (Devanagiri) \ + * **ms**: Malay (Latin) \ + * **mt**: Maltese \ + * **kmj**: Malto (Devanagiri) \ + * **gv**: Manx \ + * **mi**: Maori \ + * **mr**: Marathi \ + * **mn**: Mongolian (Cyrillic) \ + * **cnr-cyrl**: Montenegrin (Cyrillic) \ + * **cnr-latn**: Montenegrin (Latin) \ + * **nap**: Neapolitan \ + * **ne**: Nepali \ + * **niu**: Niuean \ + * **nog**: Nogay \ + * **sme**: Northern Sami (Latin) \ + * **nb**: Norwegian \ + * **no**: Norwegian \ + * **oc**: Occitan \ + * **os**: Ossetic \ + * **ps**: Pashto \ + * **fa**: Persian \ + * **pl**: Polish \ + * **pt**: Portuguese \ + * **pa**: Punjabi (Arabic) \ + * **ksh**: Ripuarian \ + * **ro**: Romanian \ + * **rm**: Romansh \ + * **ru**: Russian \ + * **sck**: Sadri (Devanagiri) \ + * **sm**: Samoan (Latin) \ + * **sa**: Sanskrit (Devanagiri) \ + * **sat**: Santali (Devanagiri) \ + * **sco**: Scots \ + * **gd**: Scottish Gaelic \ + * **sr**: Serbian (Latin) \ + * **sr-Cyrl**: Serbian (Cyrillic) \ + * **sr-Latn**: Serbian (Latin) \ + * **xsr**: Sherpa (Devanagiri) \ + * **srx**: Sirmauri (Devanagiri) \ + * **sms**: Skolt Sami \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **so**: Somali (Arabic) \ + * **sma**: Southern Sami \ + * **es**: Spanish \ + * **sw**: Swahili (Latin) \ + * **sv**: Swedish \ + * **tg**: Tajik (Cyrillic) \ + * **tt**: Tatar (Latin) \ + * **tet**: Tetum \ + * **thf**: Thangmi \ + * **to**: Tongan \ + * **tr**: Turkish \ + * **tk**: Turkmen (Latin) \ + * **tyv**: Tuvan \ + * **hsb**: Upper Sorbian \ + * **ur**: Urdu \ + * **ug**: Uyghur (Arabic) \ + * **uz-arab**: Uzbek (Arabic) \ + * **uz-cyrl**: Uzbek (Cyrillic) \ + * **uz**: Uzbek (Latin) \ + * **vo**: Volapük \ + * **wae**: Walser \ + * **cy**: Welsh \ + * **fy**: Western Frisian \ + * **yua**: Yucatec Maya \ + * **za**: Zhuang \ + * **zu**: Zulu \ + * **unk**: Unknown (All) + */ +export type OcrSkillLanguage = string; + +/** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */ +export enum KnownOcrLineEnding { + /** Lines are separated by a single space character. */ + Space = "space", + /** Lines are separated by a carriage return (' +') character. */ + CarriageReturn = "carriageReturn", + /** + * Lines are separated by a single line feed (' + * ') character. + */ + LineFeed = "lineFeed", + /** + * Lines are separated by a carriage return and a line feed (' + * ') character. + */ + CarriageReturnLineFeed = "carriageReturnLineFeed", +} + +/** + * Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". \ + * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **space**: Lines are separated by a single space character. \ + * **carriageReturn**: Lines are separated by a carriage return (' +') character. \ + * **lineFeed**: Lines are separated by a single line feed (' + * ') character. \ + * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed (' + * ') character. + */ +export type OcrLineEnding = string; + +/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */ +export interface ImageAnalysisSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: ImageAnalysisSkillLanguage; + /** A list of visual features. */ + visualFeatures?: VisualFeature[]; + /** A string indicating which domain-specific details to return. */ + details?: ImageDetail[]; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill"; +} + +export function imageAnalysisSkillSerializer(item: ImageAnalysisSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + visualFeatures: !item["visualFeatures"] + ? item["visualFeatures"] + : item["visualFeatures"].map((p: any) => { + return p; + }), + details: !item["details"] + ? item["details"] + : item["details"].map((p: any) => { + return p; + }), + }; +} + +export function imageAnalysisSkillDeserializer(item: any): ImageAnalysisSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + visualFeatures: !item["visualFeatures"] + ? item["visualFeatures"] + : item["visualFeatures"].map((p: any) => { + return p; + }), + details: !item["details"] + ? item["details"] + : item["details"].map((p: any) => { + return p; + }), + }; +} + +/** The language codes supported for input by ImageAnalysisSkill. */ +export enum KnownImageAnalysisSkillLanguage { + /** Arabic */ + Ar = "ar", + /** Azerbaijani */ + Az = "az", + /** Bulgarian */ + Bg = "bg", + /** Bosnian Latin */ + Bs = "bs", + /** Catalan */ + Ca = "ca", + /** Czech */ + Cs = "cs", + /** Welsh */ + Cy = "cy", + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Estonian */ + Et = "et", + /** Basque */ + Eu = "eu", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Irish */ + Ga = "ga", + /** Galician */ + Gl = "gl", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Croatian */ + Hr = "hr", + /** Hungarian */ + Hu = "hu", + /** Indonesian */ + Id = "id", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Kazakh */ + Kk = "kk", + /** Korean */ + Ko = "ko", + /** Lithuanian */ + Lt = "lt", + /** Latvian */ + Lv = "lv", + /** Macedonian */ + Mk = "mk", + /** Malay Malaysia */ + Ms = "ms", + /** Norwegian (Bokmal) */ + Nb = "nb", + /** Dutch */ + Nl = "nl", + /** Polish */ + Pl = "pl", + /** Dari */ + Prs = "prs", + /** Portuguese-Brazil */ + PtBR = "pt-BR", + /** Portuguese-Portugal */ + Pt = "pt", + /** Portuguese-Portugal */ + PtPT = "pt-PT", + /** Romanian */ + Ro = "ro", + /** Russian */ + Ru = "ru", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Serbian - Cyrillic RS */ + SrCyrl = "sr-Cyrl", + /** Serbian - Latin RS */ + SrLatn = "sr-Latn", + /** Swedish */ + Sv = "sv", + /** Thai */ + Th = "th", + /** Turkish */ + Tr = "tr", + /** Ukrainian */ + Uk = "uk", + /** Vietnamese */ + Vi = "vi", + /** Chinese Simplified */ + Zh = "zh", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", +} + +/** + * The language codes supported for input by ImageAnalysisSkill. \ + * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **ar**: Arabic \ + * **az**: Azerbaijani \ + * **bg**: Bulgarian \ + * **bs**: Bosnian Latin \ + * **ca**: Catalan \ + * **cs**: Czech \ + * **cy**: Welsh \ + * **da**: Danish \ + * **de**: German \ + * **el**: Greek \ + * **en**: English \ + * **es**: Spanish \ + * **et**: Estonian \ + * **eu**: Basque \ + * **fi**: Finnish \ + * **fr**: French \ + * **ga**: Irish \ + * **gl**: Galician \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **hr**: Croatian \ + * **hu**: Hungarian \ + * **id**: Indonesian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **kk**: Kazakh \ + * **ko**: Korean \ + * **lt**: Lithuanian \ + * **lv**: Latvian \ + * **mk**: Macedonian \ + * **ms**: Malay Malaysia \ + * **nb**: Norwegian (Bokmal) \ + * **nl**: Dutch \ + * **pl**: Polish \ + * **prs**: Dari \ + * **pt-BR**: Portuguese-Brazil \ + * **pt**: Portuguese-Portugal \ + * **pt-PT**: Portuguese-Portugal \ + * **ro**: Romanian \ + * **ru**: Russian \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **sr-Cyrl**: Serbian - Cyrillic RS \ + * **sr-Latn**: Serbian - Latin RS \ + * **sv**: Swedish \ + * **th**: Thai \ + * **tr**: Turkish \ + * **uk**: Ukrainian \ + * **vi**: Vietnamese \ + * **zh**: Chinese Simplified \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional + */ +export type ImageAnalysisSkillLanguage = string; + +/** The strings indicating what visual feature types to return. */ +export enum KnownVisualFeature { + /** Visual features recognized as adult persons. */ + Adult = "adult", + /** Visual features recognized as commercial brands. */ + Brands = "brands", + /** Categories. */ + Categories = "categories", + /** Description. */ + Description = "description", + /** Visual features recognized as people faces. */ + Faces = "faces", + /** Visual features recognized as objects. */ + Objects = "objects", + /** Tags. */ + Tags = "tags", +} + +/** + * The strings indicating what visual feature types to return. \ + * {@link KnownVisualFeature} can be used interchangeably with VisualFeature, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **adult**: Visual features recognized as adult persons. \ + * **brands**: Visual features recognized as commercial brands. \ + * **categories**: Categories. \ + * **description**: Description. \ + * **faces**: Visual features recognized as people faces. \ + * **objects**: Visual features recognized as objects. \ + * **tags**: Tags. + */ +export type VisualFeature = string; + +/** A string indicating which domain-specific details to return. */ +export enum KnownImageDetail { + /** Details recognized as celebrities. */ + Celebrities = "celebrities", + /** Details recognized as landmarks. */ + Landmarks = "landmarks", +} + +/** + * A string indicating which domain-specific details to return. \ + * {@link KnownImageDetail} can be used interchangeably with ImageDetail, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **celebrities**: Details recognized as celebrities. \ + * **landmarks**: Details recognized as landmarks. + */ +export type ImageDetail = string; + +/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */ +export interface LanguageDetectionSkill extends SearchIndexerSkill { + /** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */ + defaultCountryHint?: string; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill"; +} + +export function languageDetectionSkillSerializer(item: LanguageDetectionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultCountryHint: item["defaultCountryHint"], + modelVersion: item["modelVersion"], + }; +} + +export function languageDetectionSkillDeserializer(item: any): LanguageDetectionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultCountryHint: item["defaultCountryHint"], + modelVersion: item["modelVersion"], + }; +} + +/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */ +export interface ShaperSkill extends SearchIndexerSkill { + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.ShaperSkill"; +} + +export function shaperSkillSerializer(item: ShaperSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + }; +} + +export function shaperSkillDeserializer(item: any): ShaperSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + }; +} + +/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */ +export interface MergeSkill extends SearchIndexerSkill { + /** The tag indicates the start of the merged text. By default, the tag is an empty space. */ + insertPreTag?: string; + /** The tag indicates the end of the merged text. By default, the tag is an empty space. */ + insertPostTag?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.MergeSkill"; +} + +export function mergeSkillSerializer(item: MergeSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + insertPreTag: item["insertPreTag"], + insertPostTag: item["insertPostTag"], + }; +} + +export function mergeSkillDeserializer(item: any): MergeSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + insertPreTag: item["insertPreTag"], + insertPostTag: item["insertPostTag"], + }; +} + +/** This skill is deprecated. Use the V3.EntityRecognitionSkill instead. */ +export interface EntityRecognitionSkill extends SearchIndexerSkill { + /** A list of entity categories that should be extracted. */ + categories?: EntityCategory[]; + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: EntityRecognitionSkillLanguage; + /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */ + includeTypelessEntities?: boolean; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill"; +} + +export function entityRecognitionSkillSerializer(item: EntityRecognitionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + includeTypelessEntities: item["includeTypelessEntities"], + minimumPrecision: item["minimumPrecision"], + }; +} + +export function entityRecognitionSkillDeserializer(item: any): EntityRecognitionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + includeTypelessEntities: item["includeTypelessEntities"], + minimumPrecision: item["minimumPrecision"], + }; +} + +/** A string indicating what entity categories to return. */ +export enum KnownEntityCategory { + /** Entities describing a physical location. */ + Location = "location", + /** Entities describing an organization. */ + Organization = "organization", + /** Entities describing a person. */ + Person = "person", + /** Entities describing a quantity. */ + Quantity = "quantity", + /** Entities describing a date and time. */ + Datetime = "datetime", + /** Entities describing a URL. */ + Url = "url", + /** Entities describing an email address. */ + Email = "email", +} + +/** + * A string indicating what entity categories to return. \ + * {@link KnownEntityCategory} can be used interchangeably with EntityCategory, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **location**: Entities describing a physical location. \ + * **organization**: Entities describing an organization. \ + * **person**: Entities describing a person. \ + * **quantity**: Entities describing a quantity. \ + * **datetime**: Entities describing a date and time. \ + * **url**: Entities describing a URL. \ + * **email**: Entities describing an email address. + */ +export type EntityCategory = string; + +/** Deprecated. The language codes supported for input text by EntityRecognitionSkill. */ +export enum KnownEntityRecognitionSkillLanguage { + /** Arabic */ + Ar = "ar", + /** Czech */ + Cs = "cs", + /** Chinese-Simplified */ + ZhHans = "zh-Hans", + /** Chinese-Traditional */ + ZhHant = "zh-Hant", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Hungarian */ + Hu = "hu", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Portuguese (Brazil) */ + PtBR = "pt-BR", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", +} + +/** + * Deprecated. The language codes supported for input text by EntityRecognitionSkill. \ + * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **ar**: Arabic \ + * **cs**: Czech \ + * **zh-Hans**: Chinese-Simplified \ + * **zh-Hant**: Chinese-Traditional \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **hu**: Hungarian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **pt-BR**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish + */ +export type EntityRecognitionSkillLanguage = string; + +/** This skill is deprecated. Use the V3.SentimentSkill instead. */ +export interface SentimentSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: SentimentSkillLanguage; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.SentimentSkill"; +} + +export function sentimentSkillSerializer(item: SentimentSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + }; +} + +export function sentimentSkillDeserializer(item: any): SentimentSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + }; +} + +/** Deprecated. The language codes supported for input text by SentimentSkill. */ +export enum KnownSentimentSkillLanguage { + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Italian */ + It = "it", + /** Norwegian (Bokmaal) */ + No = "no", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Russian */ + Ru = "ru", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", +} + +/** + * Deprecated. The language codes supported for input text by SentimentSkill. \ + * {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **it**: Italian \ + * **no**: Norwegian (Bokmaal) \ + * **pl**: Polish \ + * **pt-PT**: Portuguese (Portugal) \ + * **ru**: Russian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **tr**: Turkish + */ +export type SentimentSkillLanguage = string; + +/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */ +export interface SentimentSkillV3 extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */ + includeOpinionMining?: boolean; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill"; +} + +export function sentimentSkillV3Serializer(item: SentimentSkillV3): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + includeOpinionMining: item["includeOpinionMining"], + modelVersion: item["modelVersion"], + }; +} + +export function sentimentSkillV3Deserializer(item: any): SentimentSkillV3 { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + includeOpinionMining: item["includeOpinionMining"], + modelVersion: item["modelVersion"], + }; +} + +/** Using the Text Analytics API, extracts linked entities from text. */ +export interface EntityLinkingSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.V3.EntityLinkingSkill"; +} + +export function entityLinkingSkillSerializer(item: EntityLinkingSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +export function entityLinkingSkillDeserializer(item: any): EntityLinkingSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +/** Using the Text Analytics API, extracts entities of different types from text. */ +export interface EntityRecognitionSkillV3 extends SearchIndexerSkill { + /** A list of entity categories that should be extracted. */ + categories?: string[]; + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill"; +} + +export function entityRecognitionSkillV3Serializer(item: EntityRecognitionSkillV3): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +export function entityRecognitionSkillV3Deserializer(item: any): EntityRecognitionSkillV3 { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + categories: !item["categories"] + ? item["categories"] + : item["categories"].map((p: any) => { + return p; + }), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + modelVersion: item["modelVersion"], + }; +} + +/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */ +export interface PIIDetectionSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: string; + /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */ + minimumPrecision?: number; + /** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */ + maskingMode?: PIIDetectionSkillMaskingMode; + /** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */ + mask?: string; + /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */ + modelVersion?: string; + /** A list of PII entity categories that should be extracted and masked. */ + piiCategories?: string[]; + /** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */ + domain?: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.PIIDetectionSkill"; +} + +export function piiDetectionSkillSerializer(item: PIIDetectionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + maskingMode: item["maskingMode"], + maskingCharacter: item["mask"], + modelVersion: item["modelVersion"], + piiCategories: !item["piiCategories"] + ? item["piiCategories"] + : item["piiCategories"].map((p: any) => { + return p; + }), + domain: item["domain"], + }; +} + +export function piiDetectionSkillDeserializer(item: any): PIIDetectionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + minimumPrecision: item["minimumPrecision"], + maskingMode: item["maskingMode"], + mask: item["maskingCharacter"], + modelVersion: item["modelVersion"], + piiCategories: !item["piiCategories"] + ? item["piiCategories"] + : item["piiCategories"].map((p: any) => { + return p; + }), + domain: item["domain"], + }; +} + +/** A string indicating what maskingMode to use to mask the personal information detected in the input text. */ +export enum KnownPIIDetectionSkillMaskingMode { + /** No masking occurs and the maskedText output will not be returned. */ + None = "none", + /** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */ + Replace = "replace", +} + +/** + * A string indicating what maskingMode to use to mask the personal information detected in the input text. \ + * {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: No masking occurs and the maskedText output will not be returned. \ + * **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. + */ +export type PIIDetectionSkillMaskingMode = string; + +/** A skill to split a string into chunks of text. */ +export interface SplitSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: SplitSkillLanguage; + /** A value indicating which split mode to perform. */ + textSplitMode?: TextSplitMode; + /** The desired maximum page length. Default is 10000. */ + maximumPageLength?: number; + /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */ + pageOverlapLength?: number; + /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */ + maximumPagesToTake?: number; + /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */ + unit?: SplitSkillUnit; + /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */ + azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.SplitSkill"; +} + +export function splitSkillSerializer(item: SplitSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + textSplitMode: item["textSplitMode"], + maximumPageLength: item["maximumPageLength"], + pageOverlapLength: item["pageOverlapLength"], + maximumPagesToTake: item["maximumPagesToTake"], + unit: item["unit"], + azureOpenAITokenizerParameters: !item["azureOpenAITokenizerParameters"] + ? item["azureOpenAITokenizerParameters"] + : azureOpenAITokenizerParametersSerializer(item["azureOpenAITokenizerParameters"]), + }; +} + +export function splitSkillDeserializer(item: any): SplitSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + textSplitMode: item["textSplitMode"], + maximumPageLength: item["maximumPageLength"], + pageOverlapLength: item["pageOverlapLength"], + maximumPagesToTake: item["maximumPagesToTake"], + unit: item["unit"], + azureOpenAITokenizerParameters: !item["azureOpenAITokenizerParameters"] + ? item["azureOpenAITokenizerParameters"] + : azureOpenAITokenizerParametersDeserializer(item["azureOpenAITokenizerParameters"]), + }; +} + +/** The language codes supported for input text by SplitSkill. */ +export enum KnownSplitSkillLanguage { + /** Amharic */ + Am = "am", + /** Bosnian */ + Bs = "bs", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Estonian */ + Et = "et", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Croatian */ + Hr = "hr", + /** Hungarian */ + Hu = "hu", + /** Indonesian */ + Id = "id", + /** Icelandic */ + Is = "is", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Korean */ + Ko = "ko", + /** Latvian */ + Lv = "lv", + /** Norwegian */ + Nb = "nb", + /** Dutch */ + Nl = "nl", + /** Polish */ + Pl = "pl", + /** Portuguese (Portugal) */ + Pt = "pt", + /** Portuguese (Brazil) */ + PtBr = "pt-br", + /** Russian */ + Ru = "ru", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Serbian */ + Sr = "sr", + /** Swedish */ + Sv = "sv", + /** Turkish */ + Tr = "tr", + /** Urdu */ + Ur = "ur", + /** Chinese (Simplified) */ + Zh = "zh", +} + +/** + * The language codes supported for input text by SplitSkill. \ + * {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **am**: Amharic \ + * **bs**: Bosnian \ + * **cs**: Czech \ + * **da**: Danish \ + * **de**: German \ + * **en**: English \ + * **es**: Spanish \ + * **et**: Estonian \ + * **fi**: Finnish \ + * **fr**: French \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **hr**: Croatian \ + * **hu**: Hungarian \ + * **id**: Indonesian \ + * **is**: Icelandic \ + * **it**: Italian \ + * **ja**: Japanese \ + * **ko**: Korean \ + * **lv**: Latvian \ + * **nb**: Norwegian \ + * **nl**: Dutch \ + * **pl**: Polish \ + * **pt**: Portuguese (Portugal) \ + * **pt-br**: Portuguese (Brazil) \ + * **ru**: Russian \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **sr**: Serbian \ + * **sv**: Swedish \ + * **tr**: Turkish \ + * **ur**: Urdu \ + * **zh**: Chinese (Simplified) + */ +export type SplitSkillLanguage = string; + +/** A value indicating which split mode to perform. */ +export enum KnownTextSplitMode { + /** Split the text into individual pages. */ + Pages = "pages", + /** Split the text into individual sentences. */ + Sentences = "sentences", +} + +/** + * A value indicating which split mode to perform. \ + * {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **pages**: Split the text into individual pages. \ + * **sentences**: Split the text into individual sentences. + */ +export type TextSplitMode = string; + +/** A value indicating which unit to use. */ +export enum KnownSplitSkillUnit { + /** The length will be measured by character. */ + Characters = "characters", + /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */ + AzureOpenAITokens = "azureOpenAITokens", +} + +/** + * A value indicating which unit to use. \ + * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: The length will be measured by character. \ + * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. + */ +export type SplitSkillUnit = string; + +/** Azure OpenAI Tokenizer parameters. */ +export interface AzureOpenAITokenizerParameters { + /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */ + encoderModelName?: SplitSkillEncoderModelName; + /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */ + allowedSpecialTokens?: string[]; +} + +export function azureOpenAITokenizerParametersSerializer( + item: AzureOpenAITokenizerParameters, +): any { + return { + encoderModelName: item["encoderModelName"], + allowedSpecialTokens: !item["allowedSpecialTokens"] + ? item["allowedSpecialTokens"] + : item["allowedSpecialTokens"].map((p: any) => { + return p; + }), + }; +} + +export function azureOpenAITokenizerParametersDeserializer( + item: any, +): AzureOpenAITokenizerParameters { + return { + encoderModelName: item["encoderModelName"], + allowedSpecialTokens: !item["allowedSpecialTokens"] + ? item["allowedSpecialTokens"] + : item["allowedSpecialTokens"].map((p: any) => { + return p; + }), + }; +} + +/** A value indicating which tokenizer to use. */ +export enum KnownSplitSkillEncoderModelName { + /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */ + R50KBase = "r50k_base", + /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */ + P50KBase = "p50k_base", + /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */ + P50KEdit = "p50k_edit", + /** A base model with a 100,000 token vocabulary. */ + CL100KBase = "cl100k_base", +} + +/** + * A value indicating which tokenizer to use. \ + * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \ + * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \ + * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \ + * **cl100k_base**: A base model with a 100,000 token vocabulary. + */ +export type SplitSkillEncoderModelName = string; + +/** A skill looks for text from a custom, user-defined list of words and phrases. */ +export interface CustomEntityLookupSkill extends SearchIndexerSkill { + /** A value indicating which language code to use. Default is `en`. */ + defaultLanguageCode?: CustomEntityLookupSkillLanguage; + /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */ + entitiesDefinitionUri?: string; + /** The inline CustomEntity definition. */ + inlineEntitiesDefinition?: CustomEntity[]; + /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */ + globalDefaultCaseSensitive?: boolean; + /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */ + globalDefaultAccentSensitive?: boolean; + /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */ + globalDefaultFuzzyEditDistance?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill"; +} + +export function customEntityLookupSkillSerializer(item: CustomEntityLookupSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + entitiesDefinitionUri: item["entitiesDefinitionUri"], + inlineEntitiesDefinition: !item["inlineEntitiesDefinition"] + ? item["inlineEntitiesDefinition"] + : customEntityArraySerializer(item["inlineEntitiesDefinition"]), + globalDefaultCaseSensitive: item["globalDefaultCaseSensitive"], + globalDefaultAccentSensitive: item["globalDefaultAccentSensitive"], + globalDefaultFuzzyEditDistance: item["globalDefaultFuzzyEditDistance"], + }; +} + +export function customEntityLookupSkillDeserializer(item: any): CustomEntityLookupSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultLanguageCode: item["defaultLanguageCode"], + entitiesDefinitionUri: item["entitiesDefinitionUri"], + inlineEntitiesDefinition: !item["inlineEntitiesDefinition"] + ? item["inlineEntitiesDefinition"] + : customEntityArrayDeserializer(item["inlineEntitiesDefinition"]), + globalDefaultCaseSensitive: item["globalDefaultCaseSensitive"], + globalDefaultAccentSensitive: item["globalDefaultAccentSensitive"], + globalDefaultFuzzyEditDistance: item["globalDefaultFuzzyEditDistance"], + }; +} + +/** The language codes supported for input text by CustomEntityLookupSkill. */ +export enum KnownCustomEntityLookupSkillLanguage { + /** Danish */ + Da = "da", + /** German */ + De = "de", + /** English */ + En = "en", + /** Spanish */ + Es = "es", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** Italian */ + It = "it", + /** Korean */ + Ko = "ko", + /** Portuguese */ + Pt = "pt", +} + +/** + * The language codes supported for input text by CustomEntityLookupSkill. \ + * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **da**: Danish \ + * **de**: German \ + * **en**: English \ + * **es**: Spanish \ + * **fi**: Finnish \ + * **fr**: French \ + * **it**: Italian \ + * **ko**: Korean \ + * **pt**: Portuguese + */ +export type CustomEntityLookupSkillLanguage = string; + +export function customEntityArraySerializer(result: Array): any[] { + return result.map((item) => { + return customEntitySerializer(item); + }); +} + +export function customEntityArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return customEntityDeserializer(item); + }); +} + +/** An object that contains information about the matches that were found, and related metadata. */ +export interface CustomEntity { + /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. */ + name: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + description?: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + type?: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + subtype?: string; + /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */ + id?: string; + /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. */ + caseSensitive?: boolean; + /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */ + accentSensitive?: boolean; + /** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */ + fuzzyEditDistance?: number; + /** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */ + defaultCaseSensitive?: boolean; + /** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */ + defaultAccentSensitive?: boolean; + /** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */ + defaultFuzzyEditDistance?: number; + /** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */ + aliases?: CustomEntityAlias[]; +} + +export function customEntitySerializer(item: CustomEntity): any { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subtype: item["subtype"], + id: item["id"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + defaultCaseSensitive: item["defaultCaseSensitive"], + defaultAccentSensitive: item["defaultAccentSensitive"], + defaultFuzzyEditDistance: item["defaultFuzzyEditDistance"], + aliases: !item["aliases"] ? item["aliases"] : customEntityAliasArraySerializer(item["aliases"]), + }; +} + +export function customEntityDeserializer(item: any): CustomEntity { + return { + name: item["name"], + description: item["description"], + type: item["type"], + subtype: item["subtype"], + id: item["id"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + defaultCaseSensitive: item["defaultCaseSensitive"], + defaultAccentSensitive: item["defaultAccentSensitive"], + defaultFuzzyEditDistance: item["defaultFuzzyEditDistance"], + aliases: !item["aliases"] + ? item["aliases"] + : customEntityAliasArrayDeserializer(item["aliases"]), + }; +} + +export function customEntityAliasArraySerializer(result: Array): any[] { + return result.map((item) => { + return customEntityAliasSerializer(item); + }); +} + +export function customEntityAliasArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return customEntityAliasDeserializer(item); + }); +} + +/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */ +export interface CustomEntityAlias { + /** The text of the alias. */ + text: string; + /** Determine if the alias is case sensitive. */ + caseSensitive?: boolean; + /** Determine if the alias is accent sensitive. */ + accentSensitive?: boolean; + /** Determine the fuzzy edit distance of the alias. */ + fuzzyEditDistance?: number; +} + +export function customEntityAliasSerializer(item: CustomEntityAlias): any { + return { + text: item["text"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + }; +} + +export function customEntityAliasDeserializer(item: any): CustomEntityAlias { + return { + text: item["text"], + caseSensitive: item["caseSensitive"], + accentSensitive: item["accentSensitive"], + fuzzyEditDistance: item["fuzzyEditDistance"], + }; +} + +/** A skill to translate text from one language to another. */ +export interface TextTranslationSkill extends SearchIndexerSkill { + /** The language code to translate documents into for documents that don't specify the to language explicitly. */ + defaultToLanguageCode: TextTranslationSkillLanguage; + /** The language code to translate documents from for documents that don't specify the from language explicitly. */ + defaultFromLanguageCode?: TextTranslationSkillLanguage; + /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is `en`. */ + suggestedFrom?: TextTranslationSkillLanguage; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.TranslationSkill"; +} + +export function textTranslationSkillSerializer(item: TextTranslationSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + defaultToLanguageCode: item["defaultToLanguageCode"], + defaultFromLanguageCode: item["defaultFromLanguageCode"], + suggestedFrom: item["suggestedFrom"], + }; +} + +export function textTranslationSkillDeserializer(item: any): TextTranslationSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + defaultToLanguageCode: item["defaultToLanguageCode"], + defaultFromLanguageCode: item["defaultFromLanguageCode"], + suggestedFrom: item["suggestedFrom"], + }; +} + +/** The language codes supported for input text by TextTranslationSkill. */ +export enum KnownTextTranslationSkillLanguage { + /** Afrikaans */ + Af = "af", + /** Arabic */ + Ar = "ar", + /** Bangla */ + Bn = "bn", + /** Bosnian (Latin) */ + Bs = "bs", + /** Bulgarian */ + Bg = "bg", + /** Cantonese (Traditional) */ + Yue = "yue", + /** Catalan */ + Ca = "ca", + /** Chinese Simplified */ + ZhHans = "zh-Hans", + /** Chinese Traditional */ + ZhHant = "zh-Hant", + /** Croatian */ + Hr = "hr", + /** Czech */ + Cs = "cs", + /** Danish */ + Da = "da", + /** Dutch */ + Nl = "nl", + /** English */ + En = "en", + /** Estonian */ + Et = "et", + /** Fijian */ + Fj = "fj", + /** Filipino */ + Fil = "fil", + /** Finnish */ + Fi = "fi", + /** French */ + Fr = "fr", + /** German */ + De = "de", + /** Greek */ + El = "el", + /** Haitian Creole */ + Ht = "ht", + /** Hebrew */ + He = "he", + /** Hindi */ + Hi = "hi", + /** Hmong Daw */ + Mww = "mww", + /** Hungarian */ + Hu = "hu", + /** Icelandic */ + Is = "is", + /** Indonesian */ + Id = "id", + /** Italian */ + It = "it", + /** Japanese */ + Ja = "ja", + /** Kiswahili */ + Sw = "sw", + /** Klingon */ + Tlh = "tlh", + /** Klingon (Latin script) */ + TlhLatn = "tlh-Latn", + /** Klingon (Klingon script) */ + TlhPiqd = "tlh-Piqd", + /** Korean */ + Ko = "ko", + /** Latvian */ + Lv = "lv", + /** Lithuanian */ + Lt = "lt", + /** Malagasy */ + Mg = "mg", + /** Malay */ + Ms = "ms", + /** Maltese */ + Mt = "mt", + /** Norwegian */ + Nb = "nb", + /** Persian */ + Fa = "fa", + /** Polish */ + Pl = "pl", + /** Portuguese */ + Pt = "pt", + /** Portuguese (Brazil) */ + PtBr = "pt-br", + /** Portuguese (Portugal) */ + PtPT = "pt-PT", + /** Queretaro Otomi */ + Otq = "otq", + /** Romanian */ + Ro = "ro", + /** Russian */ + Ru = "ru", + /** Samoan */ + Sm = "sm", + /** Serbian (Cyrillic) */ + SrCyrl = "sr-Cyrl", + /** Serbian (Latin) */ + SrLatn = "sr-Latn", + /** Slovak */ + Sk = "sk", + /** Slovenian */ + Sl = "sl", + /** Spanish */ + Es = "es", + /** Swedish */ + Sv = "sv", + /** Tahitian */ + Ty = "ty", + /** Tamil */ + Ta = "ta", + /** Telugu */ + Te = "te", + /** Thai */ + Th = "th", + /** Tongan */ + To = "to", + /** Turkish */ + Tr = "tr", + /** Ukrainian */ + Uk = "uk", + /** Urdu */ + Ur = "ur", + /** Vietnamese */ + Vi = "vi", + /** Welsh */ + Cy = "cy", + /** Yucatec Maya */ + Yua = "yua", + /** Irish */ + Ga = "ga", + /** Kannada */ + Kn = "kn", + /** Maori */ + Mi = "mi", + /** Malayalam */ + Ml = "ml", + /** Punjabi */ + Pa = "pa", +} + +/** + * The language codes supported for input text by TextTranslationSkill. \ + * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **af**: Afrikaans \ + * **ar**: Arabic \ + * **bn**: Bangla \ + * **bs**: Bosnian (Latin) \ + * **bg**: Bulgarian \ + * **yue**: Cantonese (Traditional) \ + * **ca**: Catalan \ + * **zh-Hans**: Chinese Simplified \ + * **zh-Hant**: Chinese Traditional \ + * **hr**: Croatian \ + * **cs**: Czech \ + * **da**: Danish \ + * **nl**: Dutch \ + * **en**: English \ + * **et**: Estonian \ + * **fj**: Fijian \ + * **fil**: Filipino \ + * **fi**: Finnish \ + * **fr**: French \ + * **de**: German \ + * **el**: Greek \ + * **ht**: Haitian Creole \ + * **he**: Hebrew \ + * **hi**: Hindi \ + * **mww**: Hmong Daw \ + * **hu**: Hungarian \ + * **is**: Icelandic \ + * **id**: Indonesian \ + * **it**: Italian \ + * **ja**: Japanese \ + * **sw**: Kiswahili \ + * **tlh**: Klingon \ + * **tlh-Latn**: Klingon (Latin script) \ + * **tlh-Piqd**: Klingon (Klingon script) \ + * **ko**: Korean \ + * **lv**: Latvian \ + * **lt**: Lithuanian \ + * **mg**: Malagasy \ + * **ms**: Malay \ + * **mt**: Maltese \ + * **nb**: Norwegian \ + * **fa**: Persian \ + * **pl**: Polish \ + * **pt**: Portuguese \ + * **pt-br**: Portuguese (Brazil) \ + * **pt-PT**: Portuguese (Portugal) \ + * **otq**: Queretaro Otomi \ + * **ro**: Romanian \ + * **ru**: Russian \ + * **sm**: Samoan \ + * **sr-Cyrl**: Serbian (Cyrillic) \ + * **sr-Latn**: Serbian (Latin) \ + * **sk**: Slovak \ + * **sl**: Slovenian \ + * **es**: Spanish \ + * **sv**: Swedish \ + * **ty**: Tahitian \ + * **ta**: Tamil \ + * **te**: Telugu \ + * **th**: Thai \ + * **to**: Tongan \ + * **tr**: Turkish \ + * **uk**: Ukrainian \ + * **ur**: Urdu \ + * **vi**: Vietnamese \ + * **cy**: Welsh \ + * **yua**: Yucatec Maya \ + * **ga**: Irish \ + * **kn**: Kannada \ + * **mi**: Maori \ + * **ml**: Malayalam \ + * **pa**: Punjabi + */ +export type TextTranslationSkillLanguage = string; + +/** A skill that extracts content from a file within the enrichment pipeline. */ +export interface DocumentExtractionSkill extends SearchIndexerSkill { + /** The parsingMode for the skill. Will be set to 'default' if not defined. */ + parsingMode?: string; + /** The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. */ + dataToExtract?: string; + /** A dictionary of configurations for the skill. */ + configuration?: Record; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill"; +} + +export function documentExtractionSkillSerializer(item: DocumentExtractionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + parsingMode: item["parsingMode"], + dataToExtract: item["dataToExtract"], + configuration: item["configuration"], + }; +} + +export function documentExtractionSkillDeserializer(item: any): DocumentExtractionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + parsingMode: item["parsingMode"], + dataToExtract: item["dataToExtract"], + configuration: item["configuration"], + }; +} + +/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */ +export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill { + /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ + outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat; + /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ + outputMode?: DocumentIntelligenceLayoutSkillOutputMode; + /** The depth of headers in the markdown output. Default is h6. */ + markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; + /** Controls the cardinality of the content extracted from the document by the skill. */ + extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[]; + /** Controls the cardinality for chunking the content. */ + chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; +} + +export function documentIntelligenceLayoutSkillSerializer( + item: DocumentIntelligenceLayoutSkill, +): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + outputFormat: item["outputFormat"], + outputMode: item["outputMode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : documentIntelligenceLayoutSkillChunkingPropertiesSerializer(item["chunkingProperties"]), + }; +} + +export function documentIntelligenceLayoutSkillDeserializer( + item: any, +): DocumentIntelligenceLayoutSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + outputFormat: item["outputFormat"], + outputMode: item["outputMode"], + markdownHeaderDepth: item["markdownHeaderDepth"], + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : documentIntelligenceLayoutSkillChunkingPropertiesDeserializer(item["chunkingProperties"]), + }; +} + +/** Controls the cardinality of the output format. Default is 'markdown'. */ +export enum KnownDocumentIntelligenceLayoutSkillOutputFormat { + /** Specify the format of the output as text. */ + Text = "text", + /** Specify the format of the output as markdown. */ + Markdown = "markdown", +} + +/** + * Controls the cardinality of the output format. Default is 'markdown'. \ + * {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Specify the format of the output as text. \ + * **markdown**: Specify the format of the output as markdown. + */ +export type DocumentIntelligenceLayoutSkillOutputFormat = string; + +/** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ +export enum KnownDocumentIntelligenceLayoutSkillOutputMode { + /** Specify that the output should be parsed as 'oneToMany'. */ + OneToMany = "oneToMany", +} + +/** + * Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. \ + * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneToMany**: Specify that the output should be parsed as 'oneToMany'. + */ +export type DocumentIntelligenceLayoutSkillOutputMode = string; + +/** The depth of headers in the markdown output. Default is h6. */ +export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { + /** Header level 1. */ + H1 = "h1", + /** Header level 2. */ + H2 = "h2", + /** Header level 3. */ + H3 = "h3", + /** Header level 4. */ + H4 = "h4", + /** Header level 5. */ + H5 = "h5", + /** Header level 6. */ + H6 = "h6", +} + +/** + * The depth of headers in the markdown output. Default is h6. \ + * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **h1**: Header level 1. \ + * **h2**: Header level 2. \ + * **h3**: Header level 3. \ + * **h4**: Header level 4. \ + * **h5**: Header level 5. \ + * **h6**: Header level 6. + */ +export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; + +/** Controls the cardinality of the content extracted from the document by the skill. */ +export enum KnownDocumentIntelligenceLayoutSkillExtractionOptions { + /** Specify that image content should be extracted from the document. */ + Images = "images", + /** Specify that location metadata should be extracted from the document. */ + LocationMetadata = "locationMetadata", +} + +/** + * Controls the cardinality of the content extracted from the document by the skill. \ + * {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **images**: Specify that image content should be extracted from the document. \ + * **locationMetadata**: Specify that location metadata should be extracted from the document. + */ +export type DocumentIntelligenceLayoutSkillExtractionOptions = string; + +/** Controls the cardinality for chunking the content. */ +export interface DocumentIntelligenceLayoutSkillChunkingProperties { + /** The unit of the chunk. */ + unit?: DocumentIntelligenceLayoutSkillChunkingUnit; + /** The maximum chunk length in characters. Default is 500. */ + maximumLength?: number; + /** The length of overlap provided between two text chunks. Default is 0. */ + overlapLength?: number; +} + +export function documentIntelligenceLayoutSkillChunkingPropertiesSerializer( + item: DocumentIntelligenceLayoutSkillChunkingProperties, +): any { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +export function documentIntelligenceLayoutSkillChunkingPropertiesDeserializer( + item: any, +): DocumentIntelligenceLayoutSkillChunkingProperties { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +/** Controls the cardinality of the chunk unit. Default is 'characters' */ +export enum KnownDocumentIntelligenceLayoutSkillChunkingUnit { + /** Specifies chunk by characters. */ + Characters = "characters", +} + +/** + * Controls the cardinality of the chunk unit. Default is 'characters' \ + * {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: Specifies chunk by characters. + */ +export type DocumentIntelligenceLayoutSkillChunkingUnit = string; + +/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */ +export interface WebApiSkill extends SearchIndexerSkill { + /** The url for the Web API. */ + uri: string; + /** The headers required to make the http request. */ + httpHeaders?: Record; + /** The method for the http request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** The desired batch size which indicates number of documents. */ + batchSize?: number; + /** If set, the number of parallel calls that can be made to the Web API. */ + degreeOfParallelism?: number; + /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; +} + +export function webApiSkillSerializer(item: WebApiSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + uri: item["uri"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + }; +} + +export function webApiSkillDeserializer(item: any): WebApiSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + uri: item["uri"], + httpHeaders: item["httpHeaders"], + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + }; +} + +/** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */ +export interface AzureMachineLearningSkill extends SearchIndexerSkill { + /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ + scoringUri?: string; + /** (Required for key authentication) The key for the AML service. */ + authenticationKey?: string; + /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ + resourceId?: string; + /** (Optional) When specified, indicates the timeout for the http client making the API call. */ + timeout?: string; + /** (Optional for token authentication). The region the AML service is deployed in. */ + region?: string; + /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */ + degreeOfParallelism?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Custom.AmlSkill"; +} + +export function azureMachineLearningSkillSerializer(item: AzureMachineLearningSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + uri: item["scoringUri"], + key: item["authenticationKey"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + degreeOfParallelism: item["degreeOfParallelism"], + }; +} + +export function azureMachineLearningSkillDeserializer(item: any): AzureMachineLearningSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + scoringUri: item["uri"], + authenticationKey: item["key"], + resourceId: item["resourceId"], + timeout: item["timeout"], + region: item["region"], + degreeOfParallelism: item["degreeOfParallelism"], + }; +} + +/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */ +export interface AzureOpenAIEmbeddingSkill extends SearchIndexerSkill { + /** The resource URI of the Azure OpenAI resource. */ + resourceUrl?: string; + /** ID of the Azure OpenAI model deployment on the designated resource. */ + deploymentName?: string; + /** API key of the designated Azure OpenAI resource. */ + apiKey?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** The name of the embedding model that is deployed at the provided deploymentId path. */ + modelName?: AzureOpenAIModelName; + /** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */ + dimensions?: number; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; +} + +export function azureOpenAIEmbeddingSkillSerializer(item: AzureOpenAIEmbeddingSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + resourceUri: item["resourceUrl"], + deploymentId: item["deploymentName"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + modelName: item["modelName"], + dimensions: item["dimensions"], + }; +} + +export function azureOpenAIEmbeddingSkillDeserializer(item: any): AzureOpenAIEmbeddingSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + resourceUrl: item["resourceUri"], + deploymentName: item["deploymentId"], + apiKey: item["apiKey"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + modelName: item["modelName"], + dimensions: item["dimensions"], + }; +} + +/** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */ +export interface VisionVectorizeSkill extends SearchIndexerSkill { + /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ + modelVersion: string; + /** A URI fragment specifying the type of skill. */ + odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; +} + +export function visionVectorizeSkillSerializer(item: VisionVectorizeSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + modelVersion: item["modelVersion"], + }; +} + +export function visionVectorizeSkillDeserializer(item: any): VisionVectorizeSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + modelVersion: item["modelVersion"], + }; +} + +/** A skill that leverages Azure AI Content Understanding to process and extract structured insights from documents, enabling enriched, searchable content for enhanced document indexing and retrieval. */ +export interface ContentUnderstandingSkill extends SearchIndexerSkill { + /** Controls the cardinality of the content extracted from the document by the skill. */ + extractionOptions?: ContentUnderstandingSkillExtractionOptions[]; + /** Controls the cardinality for chunking the content. */ + chunkingProperties?: ContentUnderstandingSkillChunkingProperties; + /** A URI fragment specifying the type of skill. */ + odataType: "#Microsoft.Skills.Util.ContentUnderstandingSkill"; +} + +export function contentUnderstandingSkillSerializer(item: ContentUnderstandingSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : contentUnderstandingSkillChunkingPropertiesSerializer(item["chunkingProperties"]), + }; +} + +export function contentUnderstandingSkillDeserializer(item: any): ContentUnderstandingSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + extractionOptions: !item["extractionOptions"] + ? item["extractionOptions"] + : item["extractionOptions"].map((p: any) => { + return p; + }), + chunkingProperties: !item["chunkingProperties"] + ? item["chunkingProperties"] + : contentUnderstandingSkillChunkingPropertiesDeserializer(item["chunkingProperties"]), + odataType: item["@odata.type"], + }; +} + +/** Controls the cardinality of the content extracted from the document by the skill. */ +export enum KnownContentUnderstandingSkillExtractionOptions { + /** Specify that image content should be extracted from the document. */ + Images = "images", + /** Specify that location metadata should be extracted from the document. */ + LocationMetadata = "locationMetadata", +} + +/** + * Controls the cardinality of the content extracted from the document by the skill. \ + * {@link KnownContentUnderstandingSkillExtractionOptions} can be used interchangeably with ContentUnderstandingSkillExtractionOptions, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **images**: Specify that image content should be extracted from the document. \ + * **locationMetadata**: Specify that location metadata should be extracted from the document. + */ +export type ContentUnderstandingSkillExtractionOptions = string; + +/** Controls the cardinality for chunking the content. */ +export interface ContentUnderstandingSkillChunkingProperties { + /** The unit of the chunk. */ + unit?: ContentUnderstandingSkillChunkingUnit; + /** The maximum chunk length in characters. Default is 500. */ + maximumLength?: number; + /** The length of overlap provided between two text chunks. Default is 0. */ + overlapLength?: number; +} + +export function contentUnderstandingSkillChunkingPropertiesSerializer( + item: ContentUnderstandingSkillChunkingProperties, +): any { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +export function contentUnderstandingSkillChunkingPropertiesDeserializer( + item: any, +): ContentUnderstandingSkillChunkingProperties { + return { + unit: item["unit"], + maximumLength: item["maximumLength"], + overlapLength: item["overlapLength"], + }; +} + +/** Controls the cardinality of the chunk unit. Default is 'characters' */ +export enum KnownContentUnderstandingSkillChunkingUnit { + /** Specifies chunk by characters. */ + Characters = "characters", +} + +/** + * Controls the cardinality of the chunk unit. Default is 'characters' \ + * {@link KnownContentUnderstandingSkillChunkingUnit} can be used interchangeably with ContentUnderstandingSkillChunkingUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: Specifies chunk by characters. + */ +export type ContentUnderstandingSkillChunkingUnit = string; + +/** A skill that calls a language model via Azure AI Foundry's Chat Completions endpoint. */ +export interface ChatCompletionSkill extends SearchIndexerSkill { + /** The url for the Web API. */ + uri: string; + /** The headers required to make the http request. */ + httpHeaders?: WebApiHttpHeaders; + /** The method for the http request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; + /** The desired batch size which indicates number of documents. */ + batchSize?: number; + /** If set, the number of parallel calls that can be made to the Web API. */ + degreeOfParallelism?: number; + /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. */ + authIdentity?: SearchIndexerDataIdentityUnion; + /** API key for authenticating to the model. Both apiKey and authIdentity cannot be specified at the same time. */ + apiKey?: string; + /** Common language model parameters that customers can tweak. If omitted, reasonable defaults will be applied. */ + commonModelParameters?: CommonModelParameters; + /** Open-type dictionary for model-specific parameters that should be appended to the chat completions call. Follows Azure AI Foundry's extensibility pattern. */ + extraParameters?: Record; + /** How extra parameters are handled by Azure AI Foundry. Default is 'error'. */ + extraParametersBehavior?: ChatCompletionExtraParametersBehavior; + /** Determines how the LLM should format its response. Defaults to 'text' response type. */ + responseFormat?: ChatCompletionResponseFormat; + /** A URI fragment specifying the type of skill. */ + odataType: "#Microsoft.Skills.Custom.ChatCompletionSkill"; +} + +export function chatCompletionSkillSerializer(item: ChatCompletionSkill): any { + return { + "@odata.type": item["odatatype"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArraySerializer(item["inputs"]), + outputs: outputFieldMappingEntryArraySerializer(item["outputs"]), + uri: item["uri"], + httpHeaders: !item["httpHeaders"] + ? item["httpHeaders"] + : webApiHttpHeadersSerializer(item["httpHeaders"]), + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionSerializer(item["authIdentity"]), + apiKey: item["apiKey"], + commonModelParameters: !item["commonModelParameters"] + ? item["commonModelParameters"] + : commonModelParametersSerializer(item["commonModelParameters"]), + extraParameters: item["extraParameters"], + extraParametersBehavior: item["extraParametersBehavior"], + responseFormat: !item["responseFormat"] + ? item["responseFormat"] + : chatCompletionResponseFormatSerializer(item["responseFormat"]), + }; +} + +export function chatCompletionSkillDeserializer(item: any): ChatCompletionSkill { + return { + odatatype: item["@odata.type"], + name: item["name"], + description: item["description"], + context: item["context"], + inputs: inputFieldMappingEntryArrayDeserializer(item["inputs"]), + outputs: outputFieldMappingEntryArrayDeserializer(item["outputs"]), + uri: item["uri"], + httpHeaders: !item["httpHeaders"] + ? item["httpHeaders"] + : webApiHttpHeadersDeserializer(item["httpHeaders"]), + httpMethod: item["httpMethod"], + timeout: item["timeout"], + batchSize: item["batchSize"], + degreeOfParallelism: item["degreeOfParallelism"], + authResourceId: item["authResourceId"], + authIdentity: !item["authIdentity"] + ? item["authIdentity"] + : searchIndexerDataIdentityUnionDeserializer(item["authIdentity"]), + apiKey: item["apiKey"], + commonModelParameters: !item["commonModelParameters"] + ? item["commonModelParameters"] + : commonModelParametersDeserializer(item["commonModelParameters"]), + extraParameters: item["extraParameters"], + extraParametersBehavior: item["extraParametersBehavior"], + responseFormat: !item["responseFormat"] + ? item["responseFormat"] + : chatCompletionResponseFormatDeserializer(item["responseFormat"]), + odataType: item["@odata.type"], + }; +} + +/** A dictionary of http request headers. */ +export interface WebApiHttpHeaders { + /** Additional properties */ + additionalProperties?: Record; +} + +export function webApiHttpHeadersSerializer(item: WebApiHttpHeaders): any { + return { ...serializeRecord(item.additionalProperties ?? {}) }; +} + +export function webApiHttpHeadersDeserializer(item: any): WebApiHttpHeaders { + return { + additionalProperties: serializeRecord(item, []), + }; +} + +/** Common language model parameters for Chat Completions. If omitted, default values are used. */ +export interface CommonModelParameters { + /** The name of the model to use (e.g., 'gpt-4o', etc.). Default is null if not specified. */ + modelName?: string; + /** A float in the range [-2,2] that reduces or increases likelihood of repeated tokens. Default is 0. */ + frequencyPenalty?: number; + /** A float in the range [-2,2] that penalizes new tokens based on their existing presence. Default is 0. */ + presencePenalty?: number; + /** Maximum number of tokens to generate. */ + maxTokens?: number; + /** Sampling temperature. Default is 0.7. */ + temperature?: number; + /** Random seed for controlling deterministic outputs. If omitted, randomization is used. */ + seed?: number; + /** List of stop sequences that will cut off text generation. Default is none. */ + stop?: string[]; +} + +export function commonModelParametersSerializer(item: CommonModelParameters): any { + return { + model: item["modelName"], + frequencyPenalty: item["frequencyPenalty"], + presencePenalty: item["presencePenalty"], + maxTokens: item["maxTokens"], + temperature: item["temperature"], + seed: item["seed"], + stop: !item["stop"] + ? item["stop"] + : item["stop"].map((p: any) => { + return p; + }), + }; +} + +export function commonModelParametersDeserializer(item: any): CommonModelParameters { + return { + modelName: item["model"], + frequencyPenalty: item["frequencyPenalty"], + presencePenalty: item["presencePenalty"], + maxTokens: item["maxTokens"], + temperature: item["temperature"], + seed: item["seed"], + stop: !item["stop"] + ? item["stop"] + : item["stop"].map((p: any) => { + return p; + }), + }; +} + +/** Specifies how 'extraParameters' should be handled by Azure AI Foundry. Defaults to 'error'. */ +export enum KnownChatCompletionExtraParametersBehavior { + /** Passes any extra parameters directly to the model. */ + PassThrough = "passThrough", + /** Drops all extra parameters. */ + Drop = "drop", + /** Raises an error if any extra parameter is present. */ + Error = "error", +} + +/** + * Specifies how 'extraParameters' should be handled by Azure AI Foundry. Defaults to 'error'. \ + * {@link KnownChatCompletionExtraParametersBehavior} can be used interchangeably with ChatCompletionExtraParametersBehavior, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **passThrough**: Passes any extra parameters directly to the model. \ + * **drop**: Drops all extra parameters. \ + * **error**: Raises an error if any extra parameter is present. + */ +export type ChatCompletionExtraParametersBehavior = string; + +/** Determines how the language model's response should be serialized. Defaults to 'text'. */ +export interface ChatCompletionResponseFormat { + /** Specifies how the LLM should format the response. */ + type?: ChatCompletionResponseFormatType; + /** An open dictionary for extended properties. Required if 'type' == 'json_schema' */ + jsonSchemaProperties?: ChatCompletionSchemaProperties; +} + +export function chatCompletionResponseFormatSerializer(item: ChatCompletionResponseFormat): any { + return { + type: item["type"], + jsonSchemaProperties: !item["jsonSchemaProperties"] + ? item["jsonSchemaProperties"] + : chatCompletionSchemaPropertiesSerializer(item["jsonSchemaProperties"]), + }; +} + +export function chatCompletionResponseFormatDeserializer(item: any): ChatCompletionResponseFormat { + return { + type: item["type"], + jsonSchemaProperties: !item["jsonSchemaProperties"] + ? item["jsonSchemaProperties"] + : chatCompletionSchemaPropertiesDeserializer(item["jsonSchemaProperties"]), + }; +} + +/** Specifies how the LLM should format the response. */ +export enum KnownChatCompletionResponseFormatType { + /** Plain text response format. */ + Text = "text", + /** Arbitrary JSON object response format. */ + JsonObject = "jsonObject", + /** JSON schema-adhering response format. */ + JsonSchema = "jsonSchema", +} + +/** + * Specifies how the LLM should format the response. \ + * {@link KnownChatCompletionResponseFormatType} can be used interchangeably with ChatCompletionResponseFormatType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Plain text response format. \ + * **jsonObject**: Arbitrary JSON object response format. \ + * **jsonSchema**: JSON schema-adhering response format. + */ +export type ChatCompletionResponseFormatType = string; + +/** Properties for JSON schema response format. */ +export interface ChatCompletionSchemaProperties { + /** Name of the json schema the model will adhere to. */ + name?: string; + /** Description of the json schema the model will adhere to. */ + description?: string; + /** Whether or not the model's response should use structured outputs. Default is true. */ + strict?: boolean; + /** The schema definition. */ + schema?: ChatCompletionSchema; +} + +export function chatCompletionSchemaPropertiesSerializer( + item: ChatCompletionSchemaProperties, +): any { + return { + name: item["name"], + description: item["description"], + strict: item["strict"], + schema: !item["schema"] ? item["schema"] : chatCompletionSchemaSerializer(item["schema"]), + }; +} + +export function chatCompletionSchemaPropertiesDeserializer( + item: any, +): ChatCompletionSchemaProperties { + return { + name: item["name"], + description: item["description"], + strict: item["strict"], + schema: !item["schema"] ? item["schema"] : chatCompletionSchemaDeserializer(item["schema"]), + }; +} + +/** Object defining the custom schema the model will use to structure its output. */ +export interface ChatCompletionSchema { + /** Type of schema representation. Usually 'object'. Default is 'object'. */ + type?: string; + /** A JSON-formatted string that defines the output schema's properties and constraints for the model. */ + properties?: string; + /** An array of the property names that are required to be part of the model's response. */ + required?: string[]; + /** Controls whether it is allowable for an object to contain additional keys / values that were not defined in the JSON Schema. Default is false. */ + additionalProperties?: boolean; +} + +export function chatCompletionSchemaSerializer(item: ChatCompletionSchema): any { + return { + type: item["type"], + properties: item["properties"], + required: !item["required"] + ? item["required"] + : item["required"].map((p: any) => { + return p; + }), + additionalProperties: item["additionalProperties"], + }; +} + +export function chatCompletionSchemaDeserializer(item: any): ChatCompletionSchema { + return { + type: item["type"], + properties: item["properties"], + required: !item["required"] + ? item["required"] + : item["required"].map((p: any) => { + return p; + }), + additionalProperties: item["additionalProperties"], + }; +} + +/** Base type for describing any Azure AI service resource attached to a skillset. */ +export interface CognitiveServicesAccount { + /** The discriminator for derived types. */ + /** The discriminator possible values: #Microsoft.Azure.Search.DefaultCognitiveServices, #Microsoft.Azure.Search.CognitiveServicesByKey, #Microsoft.Azure.Search.AIServicesByKey, #Microsoft.Azure.Search.AIServicesByIdentity */ + odatatype: string; + /** Description of the Azure AI service resource attached to a skillset. */ + description?: string; +} + +export function cognitiveServicesAccountSerializer(item: CognitiveServicesAccount): any { + return { "@odata.type": item["odatatype"], description: item["description"] }; +} + +export function cognitiveServicesAccountDeserializer(item: any): CognitiveServicesAccount { + return { + odatatype: item["@odata.type"], + description: item["description"], + }; +} + +/** Alias for CognitiveServicesAccountUnion */ +export type CognitiveServicesAccountUnion = + | DefaultCognitiveServicesAccount + | CognitiveServicesAccountKey + | AIServicesAccountKey + | AIServicesAccountIdentity + | CognitiveServicesAccount; + +export function cognitiveServicesAccountUnionSerializer(item: CognitiveServicesAccountUnion): any { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DefaultCognitiveServices": + return defaultCognitiveServicesAccountSerializer(item as DefaultCognitiveServicesAccount); + + case "#Microsoft.Azure.Search.CognitiveServicesByKey": + return cognitiveServicesAccountKeySerializer(item as CognitiveServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByKey": + return aiServicesAccountKeySerializer(item as AIServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByIdentity": + return aiServicesAccountIdentitySerializer(item as AIServicesAccountIdentity); + + default: + return cognitiveServicesAccountSerializer(item); + } +} + +export function cognitiveServicesAccountUnionDeserializer( + item: any, +): CognitiveServicesAccountUnion { + switch (item.odatatype) { + case "#Microsoft.Azure.Search.DefaultCognitiveServices": + return defaultCognitiveServicesAccountDeserializer(item as DefaultCognitiveServicesAccount); + + case "#Microsoft.Azure.Search.CognitiveServicesByKey": + return cognitiveServicesAccountKeyDeserializer(item as CognitiveServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByKey": + return aiServicesAccountKeyDeserializer(item as AIServicesAccountKey); + + case "#Microsoft.Azure.Search.AIServicesByIdentity": + return aiServicesAccountIdentityDeserializer(item as AIServicesAccountIdentity); + + default: + return cognitiveServicesAccountDeserializer(item); + } +} + +/** An empty object that represents the default Azure AI service resource for a skillset. */ +export interface DefaultCognitiveServicesAccount extends CognitiveServicesAccount { + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; +} + +export function defaultCognitiveServicesAccountSerializer( + item: DefaultCognitiveServicesAccount, +): any { + return { "@odata.type": item["odatatype"], description: item["description"] }; +} + +export function defaultCognitiveServicesAccountDeserializer( + item: any, +): DefaultCognitiveServicesAccount { + return { + odatatype: item["@odata.type"], + description: item["description"], + }; +} + +/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */ +export interface CognitiveServicesAccountKey extends CognitiveServicesAccount { + /** The key used to provision the Azure AI service resource attached to a skillset. */ + key: string; + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey"; +} + +export function cognitiveServicesAccountKeySerializer(item: CognitiveServicesAccountKey): any { + return { + "@odata.type": item["odatatype"], + description: item["description"], + key: item["key"], + }; +} + +export function cognitiveServicesAccountKeyDeserializer(item: any): CognitiveServicesAccountKey { + return { + odatatype: item["@odata.type"], + description: item["description"], + key: item["key"], + }; +} + +/** The account key of an Azure AI service resource that's attached to a skillset, to be used with the resource's subdomain. */ +export interface AIServicesAccountKey extends CognitiveServicesAccount { + /** The key used to provision the Azure AI service resource attached to a skillset. */ + key: string; + /** The subdomain url for the corresponding AI Service. */ + subdomainUrl: string; + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.AIServicesByKey"; +} + +export function aiServicesAccountKeySerializer(item: AIServicesAccountKey): any { + return { + "@odata.type": item["odatatype"], + description: item["description"], + key: item["key"], + subdomainUrl: item["subdomainUrl"], + }; +} + +export function aiServicesAccountKeyDeserializer(item: any): AIServicesAccountKey { + return { + odatatype: item["@odata.type"], + description: item["description"], + key: item["key"], + subdomainUrl: item["subdomainUrl"], + }; +} + +/** The multi-region account of an Azure AI service resource that's attached to a skillset. */ +export interface AIServicesAccountIdentity extends CognitiveServicesAccount { + /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** The subdomain url for the corresponding AI Service. */ + subdomainUrl: string; + /** A URI fragment specifying the type of Azure AI service resource attached to a skillset. */ + odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; +} + +export function aiServicesAccountIdentitySerializer(item: AIServicesAccountIdentity): any { + return { + "@odata.type": item["odatatype"], + description: item["description"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + subdomainUrl: item["subdomainUrl"], + }; +} + +export function aiServicesAccountIdentityDeserializer(item: any): AIServicesAccountIdentity { + return { + odatatype: item["@odata.type"], + description: item["description"], + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + subdomainUrl: item["subdomainUrl"], + }; +} + +/** Definition of additional projections to azure blob, table, or files, of enriched data. */ +export interface SearchIndexerKnowledgeStore { + /** The connection string to the storage account projections will be stored in. */ + storageConnectionString: string; + /** A list of additional projections to perform during indexing. */ + projections: SearchIndexerKnowledgeStoreProjection[]; + /** The user-assigned managed identity used for connections to Azure Storage when writing knowledge store projections. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + identity?: SearchIndexerDataIdentityUnion; + /** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + parameters?: SearchIndexerKnowledgeStoreParameters; +} + +export function searchIndexerKnowledgeStoreSerializer(item: SearchIndexerKnowledgeStore): any { + return { + storageConnectionString: item["storageConnectionString"], + projections: searchIndexerKnowledgeStoreProjectionArraySerializer(item["projections"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerKnowledgeStoreParametersSerializer(item["parameters"]), + }; +} + +export function searchIndexerKnowledgeStoreDeserializer(item: any): SearchIndexerKnowledgeStore { + return { + storageConnectionString: item["storageConnectionString"], + projections: searchIndexerKnowledgeStoreProjectionArrayDeserializer(item["projections"]), + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerKnowledgeStoreParametersDeserializer(item["parameters"]), + }; +} + +export function searchIndexerKnowledgeStoreProjectionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreProjectionSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreProjectionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreProjectionDeserializer(item); + }); +} + +/** Container object for various projection selectors. */ +export interface SearchIndexerKnowledgeStoreProjection { + /** Projections to Azure Table storage. */ + tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[]; + /** Projections to Azure Blob storage. */ + objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[]; + /** Projections to Azure File storage. */ + files?: SearchIndexerKnowledgeStoreFileProjectionSelector[]; +} + +export function searchIndexerKnowledgeStoreProjectionSerializer( + item: SearchIndexerKnowledgeStoreProjection, +): any { + return { + tables: !item["tables"] + ? item["tables"] + : searchIndexerKnowledgeStoreTableProjectionSelectorArraySerializer(item["tables"]), + objects: !item["objects"] + ? item["objects"] + : searchIndexerKnowledgeStoreObjectProjectionSelectorArraySerializer(item["objects"]), + files: !item["files"] + ? item["files"] + : searchIndexerKnowledgeStoreFileProjectionSelectorArraySerializer(item["files"]), + }; +} + +export function searchIndexerKnowledgeStoreProjectionDeserializer( + item: any, +): SearchIndexerKnowledgeStoreProjection { + return { + tables: !item["tables"] + ? item["tables"] + : searchIndexerKnowledgeStoreTableProjectionSelectorArrayDeserializer(item["tables"]), + objects: !item["objects"] + ? item["objects"] + : searchIndexerKnowledgeStoreObjectProjectionSelectorArrayDeserializer(item["objects"]), + files: !item["files"] + ? item["files"] + : searchIndexerKnowledgeStoreFileProjectionSelectorArrayDeserializer(item["files"]), + }; +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreTableProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreTableProjectionSelectorDeserializer(item); + }); +} + +/** Description for what data to store in Azure Tables. */ +export interface SearchIndexerKnowledgeStoreTableProjectionSelector + extends SearchIndexerKnowledgeStoreProjectionSelector { + /** Name of the Azure table to store projected data in. */ + tableName: string; +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreTableProjectionSelector, +): any { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + tableName: item["tableName"], + }; +} + +export function searchIndexerKnowledgeStoreTableProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreTableProjectionSelector { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + tableName: item["tableName"], + }; +} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreObjectProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreObjectProjectionSelectorDeserializer(item); + }); +} + +/** Projection definition for what data to store in Azure Blob. */ +export interface SearchIndexerKnowledgeStoreObjectProjectionSelector + extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreObjectProjectionSelector, +): any { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreObjectProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreObjectProjectionSelector { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreFileProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerKnowledgeStoreFileProjectionSelectorDeserializer(item); + }); +} + +/** Projection definition for what data to store in Azure Files. */ +export interface SearchIndexerKnowledgeStoreFileProjectionSelector + extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreFileProjectionSelector, +): any { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreFileProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreFileProjectionSelector { + return { + storageContainer: item["storageContainer"], + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface SearchIndexerKnowledgeStoreParameters { + /** Whether or not projections should synthesize a generated key name if one isn't already present. */ + synthesizeGeneratedKeyName?: boolean; + /** Additional properties */ + additionalProperties?: Record; +} + +export function searchIndexerKnowledgeStoreParametersSerializer( + item: SearchIndexerKnowledgeStoreParameters, +): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + synthesizeGeneratedKeyName: item["synthesizeGeneratedKeyName"], + }; +} + +export function searchIndexerKnowledgeStoreParametersDeserializer( + item: any, +): SearchIndexerKnowledgeStoreParameters { + return { + additionalProperties: serializeRecord(item, ["synthesizeGeneratedKeyName"]), + synthesizeGeneratedKeyName: item["synthesizeGeneratedKeyName"], + }; +} + +/** Definition of additional projections to secondary search indexes. */ +export interface SearchIndexerIndexProjection { + /** A list of projections to be performed to secondary search indexes. */ + selectors: SearchIndexerIndexProjectionSelector[]; + /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ + parameters?: SearchIndexerIndexProjectionsParameters; +} + +export function searchIndexerIndexProjectionSerializer(item: SearchIndexerIndexProjection): any { + return { + selectors: searchIndexerIndexProjectionSelectorArraySerializer(item["selectors"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerIndexProjectionsParametersSerializer(item["parameters"]), + }; +} + +export function searchIndexerIndexProjectionDeserializer(item: any): SearchIndexerIndexProjection { + return { + selectors: searchIndexerIndexProjectionSelectorArrayDeserializer(item["selectors"]), + parameters: !item["parameters"] + ? item["parameters"] + : searchIndexerIndexProjectionsParametersDeserializer(item["parameters"]), + }; +} + +export function searchIndexerIndexProjectionSelectorArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerIndexProjectionSelectorSerializer(item); + }); +} + +export function searchIndexerIndexProjectionSelectorArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerIndexProjectionSelectorDeserializer(item); + }); +} + +/** Description for what data to store in the designated search index. */ +export interface SearchIndexerIndexProjectionSelector { + /** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */ + targetIndexName: string; + /** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */ + parentKeyFieldName: string; + /** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */ + sourceContext: string; + /** Mappings for the projection, or which source should be mapped to which field in the target index. */ + mappings: InputFieldMappingEntry[]; +} + +export function searchIndexerIndexProjectionSelectorSerializer( + item: SearchIndexerIndexProjectionSelector, +): any { + return { + targetIndexName: item["targetIndexName"], + parentKeyFieldName: item["parentKeyFieldName"], + sourceContext: item["sourceContext"], + mappings: inputFieldMappingEntryArraySerializer(item["mappings"]), + }; +} + +export function searchIndexerIndexProjectionSelectorDeserializer( + item: any, +): SearchIndexerIndexProjectionSelector { + return { + targetIndexName: item["targetIndexName"], + parentKeyFieldName: item["parentKeyFieldName"], + sourceContext: item["sourceContext"], + mappings: inputFieldMappingEntryArrayDeserializer(item["mappings"]), + }; +} + +/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface SearchIndexerIndexProjectionsParameters { + /** Defines behavior of the index projections in relation to the rest of the indexer. */ + projectionMode?: IndexProjectionMode; + /** Additional properties */ + additionalProperties?: Record; +} + +export function searchIndexerIndexProjectionsParametersSerializer( + item: SearchIndexerIndexProjectionsParameters, +): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + projectionMode: item["projectionMode"], + }; +} + +export function searchIndexerIndexProjectionsParametersDeserializer( + item: any, +): SearchIndexerIndexProjectionsParameters { + return { + additionalProperties: serializeRecord(item, ["projectionMode"]), + projectionMode: item["projectionMode"], + }; +} + +/** Defines behavior of the index projections in relation to the rest of the indexer. */ +export enum KnownIndexProjectionMode { + /** The source document will be skipped from writing into the indexer's target index. */ + SkipIndexingParentDocuments = "skipIndexingParentDocuments", + /** The source document will be written into the indexer's target index. This is the default pattern. */ + IncludeIndexingParentDocuments = "includeIndexingParentDocuments", +} + +/** + * Defines behavior of the index projections in relation to the rest of the indexer. \ + * {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \ + * **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern. + */ +export type IndexProjectionMode = string; + +/** Abstract class to share properties between concrete selectors. */ +export interface SearchIndexerKnowledgeStoreProjectionSelector { + /** Name of reference key to different projection. */ + referenceKeyName?: string; + /** Name of generated key to store projection under. */ + generatedKeyName?: string; + /** Source data to project. */ + source?: string; + /** Source context for complex projections. */ + sourceContext?: string; + /** Nested inputs for complex projections. */ + inputs?: InputFieldMappingEntry[]; +} + +export function searchIndexerKnowledgeStoreProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreProjectionSelector, +): any { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + }; +} + +export function searchIndexerKnowledgeStoreProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreProjectionSelector { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + }; +} + +/** Abstract class to share properties between concrete selectors. */ +export interface SearchIndexerKnowledgeStoreBlobProjectionSelector + extends SearchIndexerKnowledgeStoreProjectionSelector { + /** Blob container to store projections in. */ + storageContainer: string; +} + +export function searchIndexerKnowledgeStoreBlobProjectionSelectorSerializer( + item: SearchIndexerKnowledgeStoreBlobProjectionSelector, +): any { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArraySerializer(item["inputs"]), + storageContainer: item["storageContainer"], + }; +} + +export function searchIndexerKnowledgeStoreBlobProjectionSelectorDeserializer( + item: any, +): SearchIndexerKnowledgeStoreBlobProjectionSelector { + return { + referenceKeyName: item["referenceKeyName"], + generatedKeyName: item["generatedKeyName"], + source: item["source"], + sourceContext: item["sourceContext"], + inputs: !item["inputs"] + ? item["inputs"] + : inputFieldMappingEntryArrayDeserializer(item["inputs"]), + storageContainer: item["storageContainer"], + }; +} + +/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */ +export interface ListSkillsetsResult { + /** The skillsets defined in the Search service. */ + skillsets: SearchIndexerSkillset[]; +} + +export function listSkillsetsResultDeserializer(item: any): ListSkillsetsResult { + return { + skillsets: searchIndexerSkillsetArrayDeserializer(item["value"]), + }; +} + +export function searchIndexerSkillsetArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchIndexerSkillsetSerializer(item); + }); +} + +export function searchIndexerSkillsetArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return searchIndexerSkillsetDeserializer(item); + }); +} + +/** The type of the skill names. */ +export interface SkillNames { + /** the names of skills to be reset. */ + skillNames?: string[]; +} + +export function skillNamesSerializer(item: SkillNames): any { + return { + skillNames: !item["skillNames"] + ? item["skillNames"] + : item["skillNames"].map((p: any) => { + return p; + }), + }; +} + +/** Request body for resync indexer operation. */ +export interface IndexerResyncBody { + /** Indexer to re-ingest pre-selected permissions data from data source to index. */ + options?: IndexerResyncOption[]; +} + +export function indexerResyncBodySerializer(item: IndexerResyncBody): any { + return { + options: !item["options"] + ? item["options"] + : item["options"].map((p: any) => { + return p; + }), + }; +} + +/** Options with various types of permission data to index. */ +export enum KnownIndexerResyncOption { + /** Indexer to re-ingest pre-selected permissions data from data source to index. */ + Permissions = "permissions", +} + +/** + * Options with various types of permission data to index. \ + * {@link KnownIndexerResyncOption} can be used interchangeably with IndexerResyncOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **permissions**: Indexer to re-ingest pre-selected permissions data from data source to index. + */ +export type IndexerResyncOption = string; diff --git a/sdk/search/search-documents/src/models/azure/search/documents/knowledgeBase/index.ts b/sdk/search/search-documents/src/models/azure/search/documents/knowledgeBase/index.ts new file mode 100644 index 000000000000..8e30a97720d9 --- /dev/null +++ b/sdk/search/search-documents/src/models/azure/search/documents/knowledgeBase/index.ts @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + KnowledgeRetrievalReasoningEffort, + KnowledgeRetrievalReasoningEffortUnion, + KnownKnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalReasoningEffortKind, + KnowledgeRetrievalMinimalReasoningEffort, + KnowledgeRetrievalLowReasoningEffort, + KnowledgeRetrievalMediumReasoningEffort, + KnowledgeRetrievalHighReasoningEffort, + KnownKnowledgeRetrievalOutputMode, + KnowledgeRetrievalOutputMode, + KnowledgeBaseRetrievalRequest, + KnowledgeBaseMessage, + KnowledgeBaseMessageContent, + KnowledgeBaseMessageContentUnion, + KnownKnowledgeBaseMessageContentType, + KnowledgeBaseMessageContentType, + KnowledgeBaseMessageTextContent, + KnowledgeBaseMessageImageContent, + KnowledgeBaseImageContent, + KnowledgeRetrievalIntent, + KnowledgeRetrievalIntentUnion, + KnownKnowledgeRetrievalIntentType, + KnowledgeRetrievalIntentType, + KnowledgeRetrievalSemanticIntent, + KnowledgeSourceParams, + KnowledgeSourceParamsUnion, + SearchIndexKnowledgeSourceParams, + AzureBlobKnowledgeSourceParams, + IndexedSharePointKnowledgeSourceParams, + IndexedOneLakeKnowledgeSourceParams, + WebKnowledgeSourceParams, + RemoteSharePointKnowledgeSourceParams, + KnowledgeBaseRetrievalResponse, + KnowledgeBaseActivityRecord, + KnowledgeBaseActivityRecordUnion, + KnowledgeBaseErrorDetail, + KnowledgeBaseErrorAdditionalInfo, + KnowledgeBaseModelQueryPlanningActivityRecord, + KnowledgeBaseModelAnswerSynthesisActivityRecord, + KnowledgeBaseAgenticReasoningActivityRecord, + KnowledgeBaseReference, + KnowledgeBaseReferenceUnion, + KnowledgeBaseSearchIndexReference, + KnowledgeBaseAzureBlobReference, + KnowledgeBaseIndexedSharePointReference, + KnowledgeBaseIndexedOneLakeReference, + KnowledgeBaseWebReference, + KnowledgeBaseRemoteSharePointReference, + SharePointSensitivityLabelInfo, +} from "./models.js"; diff --git a/sdk/search/search-documents/src/models/azure/search/documents/knowledgeBase/models.ts b/sdk/search/search-documents/src/models/azure/search/documents/knowledgeBase/models.ts new file mode 100644 index 000000000000..cf332775d037 --- /dev/null +++ b/sdk/search/search-documents/src/models/azure/search/documents/knowledgeBase/models.ts @@ -0,0 +1,1185 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { KnowledgeBaseActivityRecordType, KnowledgeBaseReferenceType } from "../../../../models.js"; +import { KnowledgeSourceKind } from "../indexes/models.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Base type for reasoning effort. */ +export interface KnowledgeRetrievalReasoningEffort { + /** The kind of reasoning effort. */ + /** The discriminator possible values: minimal, low, medium, high */ + kind: KnowledgeRetrievalReasoningEffortKind; +} + +export function knowledgeRetrievalReasoningEffortSerializer( + item: KnowledgeRetrievalReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Alias for KnowledgeRetrievalReasoningEffortUnion */ +export type KnowledgeRetrievalReasoningEffortUnion = + | KnowledgeRetrievalMinimalReasoningEffort + | KnowledgeRetrievalLowReasoningEffort + | KnowledgeRetrievalMediumReasoningEffort + | KnowledgeRetrievalHighReasoningEffort + | KnowledgeRetrievalReasoningEffort; + +export function knowledgeRetrievalReasoningEffortUnionSerializer( + item: KnowledgeRetrievalReasoningEffortUnion, +): any { + switch (item.kind) { + case "minimal": + return knowledgeRetrievalMinimalReasoningEffortSerializer( + item as KnowledgeRetrievalMinimalReasoningEffort, + ); + + case "low": + return knowledgeRetrievalLowReasoningEffortSerializer( + item as KnowledgeRetrievalLowReasoningEffort, + ); + + case "medium": + return knowledgeRetrievalMediumReasoningEffortSerializer( + item as KnowledgeRetrievalMediumReasoningEffort, + ); + + case "high": + return knowledgeRetrievalHighReasoningEffortSerializer( + item as KnowledgeRetrievalHighReasoningEffort, + ); + + default: + return knowledgeRetrievalReasoningEffortSerializer(item); + } +} + +export function knowledgeRetrievalReasoningEffortUnionDeserializer( + item: any, +): KnowledgeRetrievalReasoningEffortUnion { + switch (item.kind) { + case "minimal": + return knowledgeRetrievalMinimalReasoningEffortDeserializer( + item as KnowledgeRetrievalMinimalReasoningEffort, + ); + + case "low": + return knowledgeRetrievalLowReasoningEffortDeserializer( + item as KnowledgeRetrievalLowReasoningEffort, + ); + + case "medium": + return knowledgeRetrievalMediumReasoningEffortDeserializer( + item as KnowledgeRetrievalMediumReasoningEffort, + ); + + case "high": + return knowledgeRetrievalHighReasoningEffortDeserializer( + item as KnowledgeRetrievalHighReasoningEffort, + ); + + default: + return knowledgeRetrievalReasoningEffortDeserializer(item); + } +} + +/** The amount of effort to use during retrieval. */ +export enum KnownKnowledgeRetrievalReasoningEffortKind { + /** Does not perform any source selections, any query planning, or any iterative search. */ + Minimal = "minimal", + /** Use low reasoning during retrieval. */ + Low = "low", + /** Use a moderate amount of reasoning during retrieval. */ + Medium = "medium", + /** Use a high amount of reasoning during retrieval. */ + High = "high", +} + +/** + * The amount of effort to use during retrieval. \ + * {@link KnownKnowledgeRetrievalReasoningEffortKind} can be used interchangeably with KnowledgeRetrievalReasoningEffortKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **minimal**: Does not perform any source selections, any query planning, or any iterative search. \ + * **low**: Use low reasoning during retrieval. \ + * **medium**: Use a moderate amount of reasoning during retrieval. \ + * **high**: Use a high amount of reasoning during retrieval. + */ +export type KnowledgeRetrievalReasoningEffortKind = string; + +/** Run knowledge retrieval with minimal reasoning effort. */ +export interface KnowledgeRetrievalMinimalReasoningEffort + extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "minimal"; +} + +export function knowledgeRetrievalMinimalReasoningEffortSerializer( + item: KnowledgeRetrievalMinimalReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalMinimalReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalMinimalReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Run knowledge retrieval with low reasoning effort. */ +export interface KnowledgeRetrievalLowReasoningEffort extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "low"; +} + +export function knowledgeRetrievalLowReasoningEffortSerializer( + item: KnowledgeRetrievalLowReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalLowReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalLowReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Run knowledge retrieval with medium reasoning effort. */ +export interface KnowledgeRetrievalMediumReasoningEffort extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "medium"; +} + +export function knowledgeRetrievalMediumReasoningEffortSerializer( + item: KnowledgeRetrievalMediumReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalMediumReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalMediumReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** Run knowledge retrieval with high reasoning effort. */ +export interface KnowledgeRetrievalHighReasoningEffort extends KnowledgeRetrievalReasoningEffort { + /** The discriminator value. */ + kind: "high"; +} + +export function knowledgeRetrievalHighReasoningEffortSerializer( + item: KnowledgeRetrievalHighReasoningEffort, +): any { + return { kind: item["kind"] }; +} + +export function knowledgeRetrievalHighReasoningEffortDeserializer( + item: any, +): KnowledgeRetrievalHighReasoningEffort { + return { + kind: item["kind"], + }; +} + +/** The output configuration for this retrieval. */ +export enum KnownKnowledgeRetrievalOutputMode { + /** Return data from the knowledge sources directly without generative alteration. */ + ExtractiveData = "extractiveData", + /** Synthesize an answer for the response payload. */ + AnswerSynthesis = "answerSynthesis", +} + +/** + * The output configuration for this retrieval. \ + * {@link KnownKnowledgeRetrievalOutputMode} can be used interchangeably with KnowledgeRetrievalOutputMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **extractiveData**: Return data from the knowledge sources directly without generative alteration. \ + * **answerSynthesis**: Synthesize an answer for the response payload. + */ +export type KnowledgeRetrievalOutputMode = string; + +/** The input contract for the retrieval request. */ +export interface KnowledgeBaseRetrievalRequest { + /** A list of chat message style input. */ + messages?: KnowledgeBaseMessage[]; + /** A list of intended queries to execute without model query planning. */ + intents?: KnowledgeRetrievalIntentUnion[]; + /** The maximum runtime in seconds. */ + maxRuntimeInSeconds?: number; + /** Limits the maximum size of the content in the output. */ + maxOutputSize?: number; + /** The retrieval reasoning effort configuration. */ + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; + /** Indicates retrieval results should include activity information. */ + includeActivity?: boolean; + /** The output configuration for this retrieval. */ + outputMode?: KnowledgeRetrievalOutputMode; + /** A list of runtime parameters for the knowledge sources. */ + knowledgeSourceParams?: KnowledgeSourceParamsUnion[]; +} + +export function knowledgeBaseRetrievalRequestSerializer(item: KnowledgeBaseRetrievalRequest): any { + return { + messages: !item["messages"] + ? item["messages"] + : knowledgeBaseMessageArraySerializer(item["messages"]), + intents: !item["intents"] + ? item["intents"] + : knowledgeRetrievalIntentUnionArraySerializer(item["intents"]), + maxRuntimeInSeconds: item["maxRuntimeInSeconds"], + maxOutputSize: item["maxOutputSize"], + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionSerializer(item["retrievalReasoningEffort"]), + includeActivity: item["includeActivity"], + outputMode: item["outputMode"], + knowledgeSourceParams: !item["knowledgeSourceParams"] + ? item["knowledgeSourceParams"] + : knowledgeSourceParamsUnionArraySerializer(item["knowledgeSourceParams"]), + }; +} + +export function knowledgeBaseMessageArraySerializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseMessageSerializer(item); + }); +} + +export function knowledgeBaseMessageArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return knowledgeBaseMessageDeserializer(item); + }); +} + +/** The natural language message style object. */ +export interface KnowledgeBaseMessage { + /** The role of the tool response. */ + role?: string; + /** The content of the message. */ + content: KnowledgeBaseMessageContentUnion[]; +} + +export function knowledgeBaseMessageSerializer(item: KnowledgeBaseMessage): any { + return { + role: item["role"], + content: knowledgeBaseMessageContentUnionArraySerializer(item["content"]), + }; +} + +export function knowledgeBaseMessageDeserializer(item: any): KnowledgeBaseMessage { + return { + role: item["role"], + content: knowledgeBaseMessageContentUnionArrayDeserializer(item["content"]), + }; +} + +export function knowledgeBaseMessageContentUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseMessageContentUnionSerializer(item); + }); +} + +export function knowledgeBaseMessageContentUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseMessageContentUnionDeserializer(item); + }); +} + +/** Specifies the type of the message content. */ +export interface KnowledgeBaseMessageContent { + /** The type of the message */ + /** The discriminator possible values: text, image */ + type: KnowledgeBaseMessageContentType; +} + +export function knowledgeBaseMessageContentSerializer(item: KnowledgeBaseMessageContent): any { + return { type: item["type"] }; +} + +export function knowledgeBaseMessageContentDeserializer(item: any): KnowledgeBaseMessageContent { + return { + type: item["type"], + }; +} + +/** Alias for KnowledgeBaseMessageContentUnion */ +export type KnowledgeBaseMessageContentUnion = + | KnowledgeBaseMessageTextContent + | KnowledgeBaseMessageImageContent + | KnowledgeBaseMessageContent; + +export function knowledgeBaseMessageContentUnionSerializer( + item: KnowledgeBaseMessageContentUnion, +): any { + switch (item.type) { + case "text": + return knowledgeBaseMessageTextContentSerializer(item as KnowledgeBaseMessageTextContent); + + case "image": + return knowledgeBaseMessageImageContentSerializer(item as KnowledgeBaseMessageImageContent); + + default: + return knowledgeBaseMessageContentSerializer(item); + } +} + +export function knowledgeBaseMessageContentUnionDeserializer( + item: any, +): KnowledgeBaseMessageContentUnion { + switch (item.type) { + case "text": + return knowledgeBaseMessageTextContentDeserializer(item as KnowledgeBaseMessageTextContent); + + case "image": + return knowledgeBaseMessageImageContentDeserializer(item as KnowledgeBaseMessageImageContent); + + default: + return knowledgeBaseMessageContentDeserializer(item); + } +} + +/** The type of message content. */ +export enum KnownKnowledgeBaseMessageContentType { + /** Text message content kind. */ + Text = "text", + /** Image message content kind. */ + Image = "image", +} + +/** + * The type of message content. \ + * {@link KnownKnowledgeBaseMessageContentType} can be used interchangeably with KnowledgeBaseMessageContentType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Text message content kind. \ + * **image**: Image message content kind. + */ +export type KnowledgeBaseMessageContentType = string; + +/** Text message type. */ +export interface KnowledgeBaseMessageTextContent extends KnowledgeBaseMessageContent { + /** The discriminator value. */ + type: "text"; + /** The text content. */ + text: string; +} + +export function knowledgeBaseMessageTextContentSerializer( + item: KnowledgeBaseMessageTextContent, +): any { + return { type: item["type"], text: item["text"] }; +} + +export function knowledgeBaseMessageTextContentDeserializer( + item: any, +): KnowledgeBaseMessageTextContent { + return { + type: item["type"], + text: item["text"], + }; +} + +/** Image message type. */ +export interface KnowledgeBaseMessageImageContent extends KnowledgeBaseMessageContent { + /** The discriminator value. */ + type: "image"; + /** The image content. */ + image: KnowledgeBaseImageContent; +} + +export function knowledgeBaseMessageImageContentSerializer( + item: KnowledgeBaseMessageImageContent, +): any { + return { + type: item["type"], + image: knowledgeBaseImageContentSerializer(item["image"]), + }; +} + +export function knowledgeBaseMessageImageContentDeserializer( + item: any, +): KnowledgeBaseMessageImageContent { + return { + type: item["type"], + image: knowledgeBaseImageContentDeserializer(item["image"]), + }; +} + +/** Image content. */ +export interface KnowledgeBaseImageContent { + /** The url of the image. */ + url: string; +} + +export function knowledgeBaseImageContentSerializer(item: KnowledgeBaseImageContent): any { + return { url: item["url"] }; +} + +export function knowledgeBaseImageContentDeserializer(item: any): KnowledgeBaseImageContent { + return { + url: item["url"], + }; +} + +export function knowledgeRetrievalIntentUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeRetrievalIntentUnionSerializer(item); + }); +} + +/** An intended query to execute without model query planning. */ +export interface KnowledgeRetrievalIntent { + /** The type of the intent. */ + /** The discriminator possible values: semantic */ + type: KnowledgeRetrievalIntentType; +} + +export function knowledgeRetrievalIntentSerializer(item: KnowledgeRetrievalIntent): any { + return { type: item["type"] }; +} + +/** Alias for KnowledgeRetrievalIntentUnion */ +export type KnowledgeRetrievalIntentUnion = + | KnowledgeRetrievalSemanticIntent + | KnowledgeRetrievalIntent; + +export function knowledgeRetrievalIntentUnionSerializer(item: KnowledgeRetrievalIntentUnion): any { + switch (item.type) { + case "semantic": + return knowledgeRetrievalSemanticIntentSerializer(item as KnowledgeRetrievalSemanticIntent); + + default: + return knowledgeRetrievalIntentSerializer(item); + } +} + +/** The kind of knowledge base configuration to use. */ +export enum KnownKnowledgeRetrievalIntentType { + /** A natural language semantic query intent. */ + Semantic = "semantic", +} + +/** + * The kind of knowledge base configuration to use. \ + * {@link KnownKnowledgeRetrievalIntentType} can be used interchangeably with KnowledgeRetrievalIntentType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **semantic**: A natural language semantic query intent. + */ +export type KnowledgeRetrievalIntentType = string; + +/** A semantic query intent. */ +export interface KnowledgeRetrievalSemanticIntent extends KnowledgeRetrievalIntent { + /** The discriminator value. */ + type: "semantic"; + /** The semantic query to execute */ + search: string; +} + +export function knowledgeRetrievalSemanticIntentSerializer( + item: KnowledgeRetrievalSemanticIntent, +): any { + return { type: item["type"], search: item["search"] }; +} + +export function knowledgeSourceParamsUnionArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeSourceParamsUnionSerializer(item); + }); +} + +/** Base type for knowledge source runtime parameters. */ +export interface KnowledgeSourceParams { + /** The name of the index the params apply to. */ + knowledgeSourceName: string; + /** Indicates whether references should be included for data retrieved from this source. */ + includeReferences?: boolean; + /** Indicates whether references should include the structured data obtained during retrieval in their payload. */ + includeReferenceSourceData?: boolean; + /** Indicates that this knowledge source should bypass source selection and always be queried at retrieval time. */ + alwaysQuerySource?: boolean; + /** The reranker threshold all retrieved documents must meet to be included in the response. */ + rerankerThreshold?: number; + /** The type of the knowledge source. */ + /** The discriminator possible values: searchIndex, azureBlob, indexedSharePoint, indexedOneLake, web, remoteSharePoint */ + kind: KnowledgeSourceKind; +} + +export function knowledgeSourceParamsSerializer(item: KnowledgeSourceParams): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Alias for KnowledgeSourceParamsUnion */ +export type KnowledgeSourceParamsUnion = + | SearchIndexKnowledgeSourceParams + | AzureBlobKnowledgeSourceParams + | IndexedSharePointKnowledgeSourceParams + | IndexedOneLakeKnowledgeSourceParams + | WebKnowledgeSourceParams + | RemoteSharePointKnowledgeSourceParams + | KnowledgeSourceParams; + +export function knowledgeSourceParamsUnionSerializer(item: KnowledgeSourceParamsUnion): any { + switch (item.kind) { + case "searchIndex": + return searchIndexKnowledgeSourceParamsSerializer(item as SearchIndexKnowledgeSourceParams); + + case "azureBlob": + return azureBlobKnowledgeSourceParamsSerializer(item as AzureBlobKnowledgeSourceParams); + + case "indexedSharePoint": + return indexedSharePointKnowledgeSourceParamsSerializer( + item as IndexedSharePointKnowledgeSourceParams, + ); + + case "indexedOneLake": + return indexedOneLakeKnowledgeSourceParamsSerializer( + item as IndexedOneLakeKnowledgeSourceParams, + ); + + case "web": + return webKnowledgeSourceParamsSerializer(item as WebKnowledgeSourceParams); + + case "remoteSharePoint": + return remoteSharePointKnowledgeSourceParamsSerializer( + item as RemoteSharePointKnowledgeSourceParams, + ); + + default: + return knowledgeSourceParamsSerializer(item); + } +} + +/** Specifies runtime parameters for a search index knowledge source */ +export interface SearchIndexKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "searchIndex"; + /** A filter condition applied to the index (e.g., 'State eq VA'). */ + filterAddOn?: string; +} + +export function searchIndexKnowledgeSourceParamsSerializer( + item: SearchIndexKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + filterAddOn: item["filterAddOn"], + }; +} + +/** Specifies runtime parameters for a azure blob knowledge source */ +export interface AzureBlobKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "azureBlob"; +} + +export function azureBlobKnowledgeSourceParamsSerializer( + item: AzureBlobKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Specifies runtime parameters for a indexed SharePoint knowledge source */ +export interface IndexedSharePointKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "indexedSharePoint"; +} + +export function indexedSharePointKnowledgeSourceParamsSerializer( + item: IndexedSharePointKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Specifies runtime parameters for a indexed OneLake knowledge source */ +export interface IndexedOneLakeKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "indexedOneLake"; +} + +export function indexedOneLakeKnowledgeSourceParamsSerializer( + item: IndexedOneLakeKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + }; +} + +/** Specifies runtime parameters for a web knowledge source */ +export interface WebKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "web"; + /** The language of the web results. */ + language?: string; + /** The market of the web results. */ + market?: string; + /** The number of web results to return. */ + count?: number; + /** The freshness of web results. */ + freshness?: string; +} + +export function webKnowledgeSourceParamsSerializer(item: WebKnowledgeSourceParams): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + language: item["language"], + market: item["market"], + count: item["count"], + freshness: item["freshness"], + }; +} + +/** Specifies runtime parameters for a remote SharePoint knowledge source */ +export interface RemoteSharePointKnowledgeSourceParams extends KnowledgeSourceParams { + /** The discriminator value. */ + kind: "remoteSharePoint"; + /** Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to scope the retrieval before the query runs. See documentation: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference */ + filterExpression?: string; + /** A list of metadata fields to be returned for each item in the response. Only retrievable metadata properties can be included in this list. By default, no metadata is returned. Optional. */ + resourceMetadata?: string[]; + /** Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint Online. */ + containerTypeId?: string; +} + +export function remoteSharePointKnowledgeSourceParamsSerializer( + item: RemoteSharePointKnowledgeSourceParams, +): any { + return { + knowledgeSourceName: item["knowledgeSourceName"], + includeReferences: item["includeReferences"], + includeReferenceSourceData: item["includeReferenceSourceData"], + alwaysQuerySource: item["alwaysQuerySource"], + rerankerThreshold: item["rerankerThreshold"], + kind: item["kind"], + filterExpression: item["filterExpression"], + resourceMetadata: !item["resourceMetadata"] + ? item["resourceMetadata"] + : item["resourceMetadata"].map((p: any) => { + return p; + }), + containerTypeId: item["containerTypeId"], + }; +} + +/** The output contract for the retrieval response. */ +export interface KnowledgeBaseRetrievalResponse { + /** The response messages. */ + response?: KnowledgeBaseMessage[]; + /** The activity records for tracking progress and billing implications. */ + activity?: KnowledgeBaseActivityRecordUnion[]; + /** The references for the retrieval data used in the response. */ + references?: KnowledgeBaseReferenceUnion[]; +} + +export function knowledgeBaseRetrievalResponseDeserializer( + item: any, +): KnowledgeBaseRetrievalResponse { + return { + response: !item["response"] + ? item["response"] + : knowledgeBaseMessageArrayDeserializer(item["response"]), + activity: !item["activity"] + ? item["activity"] + : knowledgeBaseActivityRecordUnionArrayDeserializer(item["activity"]), + references: !item["references"] + ? item["references"] + : knowledgeBaseReferenceUnionArrayDeserializer(item["references"]), + }; +} + +export function knowledgeBaseActivityRecordUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseActivityRecordUnionDeserializer(item); + }); +} + +/** Base type for activity records. Tracks execution details, timing, and errors for knowledge base operations. */ +export interface KnowledgeBaseActivityRecord { + /** The ID of the activity record. */ + id: number; + /** The type of the activity record. */ + /** The discriminator possible values: modelQueryPlanning, modelAnswerSynthesis, agenticReasoning */ + type: KnowledgeBaseActivityRecordType; + /** The elapsed time in milliseconds for the retrieval activity. */ + elapsedMs?: number; + /** The error detail explaining why the operation failed. This property is only included when the activity does not succeed. */ + error?: KnowledgeBaseErrorDetail; +} + +export function knowledgeBaseActivityRecordDeserializer(item: any): KnowledgeBaseActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + }; +} + +/** Alias for KnowledgeBaseActivityRecordUnion */ +export type KnowledgeBaseActivityRecordUnion = + | KnowledgeBaseModelQueryPlanningActivityRecord + | KnowledgeBaseModelAnswerSynthesisActivityRecord + | KnowledgeBaseAgenticReasoningActivityRecord + | KnowledgeBaseActivityRecord; + +export function knowledgeBaseActivityRecordUnionDeserializer( + item: any, +): KnowledgeBaseActivityRecordUnion { + switch (item.type) { + case "modelQueryPlanning": + return knowledgeBaseModelQueryPlanningActivityRecordDeserializer( + item as KnowledgeBaseModelQueryPlanningActivityRecord, + ); + + case "modelAnswerSynthesis": + return knowledgeBaseModelAnswerSynthesisActivityRecordDeserializer( + item as KnowledgeBaseModelAnswerSynthesisActivityRecord, + ); + + case "agenticReasoning": + return knowledgeBaseAgenticReasoningActivityRecordDeserializer( + item as KnowledgeBaseAgenticReasoningActivityRecord, + ); + + default: + return knowledgeBaseActivityRecordDeserializer(item); + } +} + +/** The error details. */ +export interface KnowledgeBaseErrorDetail { + /** The error code. */ + code?: string; + /** The error message. */ + message?: string; + /** The error target. */ + target?: string; + /** The error details. */ + details?: KnowledgeBaseErrorDetail[]; + /** The error additional info. */ + additionalInfo?: KnowledgeBaseErrorAdditionalInfo[]; +} + +export function knowledgeBaseErrorDetailDeserializer(item: any): KnowledgeBaseErrorDetail { + return { + code: item["code"], + message: item["message"], + target: item["target"], + details: !item["details"] + ? item["details"] + : knowledgeBaseErrorDetailArrayDeserializer(item["details"]), + additionalInfo: !item["additionalInfo"] + ? item["additionalInfo"] + : knowledgeBaseErrorAdditionalInfoArrayDeserializer(item["additionalInfo"]), + }; +} + +export function knowledgeBaseErrorDetailArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseErrorDetailDeserializer(item); + }); +} + +export function knowledgeBaseErrorAdditionalInfoArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseErrorAdditionalInfoDeserializer(item); + }); +} + +/** The resource management error additional info. */ +export interface KnowledgeBaseErrorAdditionalInfo { + /** The additional info type. */ + type?: string; + /** The additional info. */ + info?: Record; +} + +export function knowledgeBaseErrorAdditionalInfoDeserializer( + item: any, +): KnowledgeBaseErrorAdditionalInfo { + return { + type: item["type"], + info: item["info"], + }; +} + +/** Represents an LLM query planning activity record. */ +export interface KnowledgeBaseModelQueryPlanningActivityRecord extends KnowledgeBaseActivityRecord { + /** The discriminator value. */ + type: "modelQueryPlanning"; + /** The number of input tokens for the LLM query planning activity. */ + inputTokens?: number; + /** The number of output tokens for the LLM query planning activity. */ + outputTokens?: number; +} + +export function knowledgeBaseModelQueryPlanningActivityRecordDeserializer( + item: any, +): KnowledgeBaseModelQueryPlanningActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + inputTokens: item["inputTokens"], + outputTokens: item["outputTokens"], + }; +} + +/** Represents an LLM answer synthesis activity record. */ +export interface KnowledgeBaseModelAnswerSynthesisActivityRecord + extends KnowledgeBaseActivityRecord { + /** The discriminator value. */ + type: "modelAnswerSynthesis"; + /** The number of input tokens for the LLM answer synthesis activity. */ + inputTokens?: number; + /** The number of output tokens for the LLM answer synthesis activity. */ + outputTokens?: number; +} + +export function knowledgeBaseModelAnswerSynthesisActivityRecordDeserializer( + item: any, +): KnowledgeBaseModelAnswerSynthesisActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + inputTokens: item["inputTokens"], + outputTokens: item["outputTokens"], + }; +} + +/** Represents an agentic reasoning activity record. */ +export interface KnowledgeBaseAgenticReasoningActivityRecord extends KnowledgeBaseActivityRecord { + /** The discriminator value. */ + type: "agenticReasoning"; + /** The number of input tokens for agentic reasoning. */ + reasoningTokens?: number; + /** The retrieval reasoning effort configuration. */ + retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion; +} + +export function knowledgeBaseAgenticReasoningActivityRecordDeserializer( + item: any, +): KnowledgeBaseAgenticReasoningActivityRecord { + return { + id: item["id"], + type: item["type"], + elapsedMs: item["elapsedMs"], + error: !item["error"] ? item["error"] : knowledgeBaseErrorDetailDeserializer(item["error"]), + reasoningTokens: item["reasoningTokens"], + retrievalReasoningEffort: !item["retrievalReasoningEffort"] + ? item["retrievalReasoningEffort"] + : knowledgeRetrievalReasoningEffortUnionDeserializer(item["retrievalReasoningEffort"]), + }; +} + +export function knowledgeBaseReferenceUnionArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return knowledgeBaseReferenceUnionDeserializer(item); + }); +} + +/** Base type for references. */ +export interface KnowledgeBaseReference { + /** The type of the reference. */ + /** The discriminator possible values: searchIndex, azureBlob, indexedSharePoint, indexedOneLake, web, remoteSharePoint */ + type: KnowledgeBaseReferenceType; + /** The ID of the reference. */ + id: string; + /** The source activity ID for the reference. */ + activitySource: number; + /** The source data for the reference. */ + sourceData?: Record; + /** The reranker score for the document reference. */ + rerankerScore?: number; +} + +export function knowledgeBaseReferenceDeserializer(item: any): KnowledgeBaseReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + }; +} + +/** Alias for KnowledgeBaseReferenceUnion */ +export type KnowledgeBaseReferenceUnion = + | KnowledgeBaseSearchIndexReference + | KnowledgeBaseAzureBlobReference + | KnowledgeBaseIndexedSharePointReference + | KnowledgeBaseIndexedOneLakeReference + | KnowledgeBaseWebReference + | KnowledgeBaseRemoteSharePointReference + | KnowledgeBaseReference; + +export function knowledgeBaseReferenceUnionDeserializer(item: any): KnowledgeBaseReferenceUnion { + switch (item.type) { + case "searchIndex": + return knowledgeBaseSearchIndexReferenceDeserializer( + item as KnowledgeBaseSearchIndexReference, + ); + + case "azureBlob": + return knowledgeBaseAzureBlobReferenceDeserializer(item as KnowledgeBaseAzureBlobReference); + + case "indexedSharePoint": + return knowledgeBaseIndexedSharePointReferenceDeserializer( + item as KnowledgeBaseIndexedSharePointReference, + ); + + case "indexedOneLake": + return knowledgeBaseIndexedOneLakeReferenceDeserializer( + item as KnowledgeBaseIndexedOneLakeReference, + ); + + case "web": + return knowledgeBaseWebReferenceDeserializer(item as KnowledgeBaseWebReference); + + case "remoteSharePoint": + return knowledgeBaseRemoteSharePointReferenceDeserializer( + item as KnowledgeBaseRemoteSharePointReference, + ); + + default: + return knowledgeBaseReferenceDeserializer(item); + } +} + +/** Represents an Azure Search document reference. */ +export interface KnowledgeBaseSearchIndexReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "searchIndex"; + /** The document key for the reference. */ + docKey?: string; +} + +export function knowledgeBaseSearchIndexReferenceDeserializer( + item: any, +): KnowledgeBaseSearchIndexReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + docKey: item["docKey"], + }; +} + +/** Represents an Azure Blob Storage document reference. */ +export interface KnowledgeBaseAzureBlobReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "azureBlob"; + /** The blob URL for the reference. */ + blobUrl?: string; +} + +export function knowledgeBaseAzureBlobReferenceDeserializer( + item: any, +): KnowledgeBaseAzureBlobReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + blobUrl: item["blobUrl"], + }; +} + +/** Represents an indexed SharePoint document reference. */ +export interface KnowledgeBaseIndexedSharePointReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "indexedSharePoint"; + /** The document URL for the reference. */ + docUrl?: string; +} + +export function knowledgeBaseIndexedSharePointReferenceDeserializer( + item: any, +): KnowledgeBaseIndexedSharePointReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + docUrl: item["docUrl"], + }; +} + +/** Represents an indexed OneLake document reference. */ +export interface KnowledgeBaseIndexedOneLakeReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "indexedOneLake"; + /** The document URL for the reference. */ + docUrl?: string; +} + +export function knowledgeBaseIndexedOneLakeReferenceDeserializer( + item: any, +): KnowledgeBaseIndexedOneLakeReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + docUrl: item["docUrl"], + }; +} + +/** Represents a web document reference. */ +export interface KnowledgeBaseWebReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "web"; + /** The url the reference data originated from. */ + url: string; + /** The title of the web document. */ + title?: string; +} + +export function knowledgeBaseWebReferenceDeserializer(item: any): KnowledgeBaseWebReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + url: item["url"], + title: item["title"], + }; +} + +/** Represents a remote SharePoint document reference. */ +export interface KnowledgeBaseRemoteSharePointReference extends KnowledgeBaseReference { + /** The discriminator value. */ + type: "remoteSharePoint"; + /** The url the reference data originated from. */ + webUrl: string; + /** Information about the sensitivity label applied to the SharePoint document. */ + searchSensitivityLabelInfo?: SharePointSensitivityLabelInfo; +} + +export function knowledgeBaseRemoteSharePointReferenceDeserializer( + item: any, +): KnowledgeBaseRemoteSharePointReference { + return { + type: item["type"], + id: item["id"], + activitySource: item["activitySource"], + sourceData: item["sourceData"], + rerankerScore: item["rerankerScore"], + webUrl: item["webUrl"], + searchSensitivityLabelInfo: !item["searchSensitivityLabelInfo"] + ? item["searchSensitivityLabelInfo"] + : sharePointSensitivityLabelInfoDeserializer(item["searchSensitivityLabelInfo"]), + }; +} + +/** Information about the sensitivity label applied to a SharePoint document. */ +export interface SharePointSensitivityLabelInfo { + /** The display name for the sensitivity label. */ + displayName?: string; + /** The ID of the sensitivity label. */ + sensitivityLabelId?: string; + /** The tooltip that should be displayed for the label in a UI. */ + tooltip?: string; + /** The priority in which the sensitivity label is applied. */ + priority?: number; + /** The color that the UI should display for the label, if configured. */ + color?: string; + /** Indicates whether the sensitivity label enforces encryption. */ + isEncrypted?: boolean; +} + +export function sharePointSensitivityLabelInfoDeserializer( + item: any, +): SharePointSensitivityLabelInfo { + return { + displayName: item["displayName"], + sensitivityLabelId: item["sensitivityLabelId"], + tooltip: item["tooltip"], + priority: item["priority"], + color: item["color"], + isEncrypted: item["isEncrypted"], + }; +} diff --git a/sdk/search/search-documents/src/models/azure/search/documents/models.ts b/sdk/search/search-documents/src/models/azure/search/documents/models.ts new file mode 100644 index 000000000000..c62a04e178a9 --- /dev/null +++ b/sdk/search/search-documents/src/models/azure/search/documents/models.ts @@ -0,0 +1,1935 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { serializeRecord } from "../../../../static-helpers/serialization/serialize-record.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */ +export interface ErrorResponse { + /** The error object. */ + error?: ErrorDetail; +} + +export function errorResponseDeserializer(item: any): ErrorResponse { + return { + error: !item["error"] ? item["error"] : errorDetailDeserializer(item["error"]), + }; +} + +/** The error detail. */ +export interface ErrorDetail { + /** The error code. */ + code?: string; + /** The error message. */ + message?: string; + /** The error target. */ + target?: string; + /** The error details. */ + details?: ErrorDetail[]; + /** The error additional info. */ + additionalInfo?: ErrorAdditionalInfo[]; +} + +export function errorDetailDeserializer(item: any): ErrorDetail { + return { + code: item["code"], + message: item["message"], + target: item["target"], + details: !item["details"] ? item["details"] : errorDetailArrayDeserializer(item["details"]), + additionalInfo: !item["additionalInfo"] + ? item["additionalInfo"] + : errorAdditionalInfoArrayDeserializer(item["additionalInfo"]), + }; +} + +export function errorDetailArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return errorDetailDeserializer(item); + }); +} + +export function errorAdditionalInfoArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return errorAdditionalInfoDeserializer(item); + }); +} + +/** The resource management error additional info. */ +export interface ErrorAdditionalInfo { + /** The additional info type. */ + type?: string; + /** The additional info. */ + info?: Record; +} + +export function errorAdditionalInfoDeserializer(item: any): ErrorAdditionalInfo { + return { + type: item["type"], + info: item["info"], + }; +} + +/** Response containing search results from an index. */ +export interface SearchDocumentsResult { + /** The total count of results found by the search operation, or null if the count was not requested. If present, the count may be greater than the number of results in this response. This can happen if you use the $top or $skip parameters, or if the query can't return all the requested documents in a single response. */ + readonly count?: number; + /** A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not specified in the request. */ + readonly coverage?: number; + /** The facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not include any facet expressions. */ + readonly facets?: Record; + /** The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'. */ + readonly answers?: QueryAnswerResult[]; + /** Debug information that applies to the search results as a whole. */ + readonly debugInfo?: DebugInfo; + /** Continuation JSON payload returned when the query can't return all the requested results in a single response. You can use this JSON along with @odata.nextLink to formulate another POST Search request to get the next part of the search response. */ + readonly nextPageParameters?: SearchRequest; + /** The sequence of results returned by the query. */ + readonly results: SearchResult[]; + /** Continuation URL returned when the query can't return all the requested results in a single response. You can use this URL to formulate another GET or POST Search request to get the next part of the search response. Make sure to use the same verb (GET or POST) as the request that produced this response. */ + readonly nextLink?: string; + /** Reason that a partial response was returned for a semantic ranking request. */ + readonly semanticPartialResponseReason?: SemanticErrorReason; + /** Type of partial response that was returned for a semantic ranking request. */ + readonly semanticPartialResponseType?: SemanticSearchResultsType; + /** Type of query rewrite that was used to retrieve documents. */ + readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; +} + +export function searchDocumentsResultSerializer(item: SearchDocumentsResult): any { + return item; +} + +export function searchDocumentsResultDeserializer(item: any): SearchDocumentsResult { + return { + count: item["@odata.count"], + coverage: item["@search.coverage"], + facets: !item["@search.facets"] + ? item["@search.facets"] + : facetResultArrayRecordDeserializer(item["@search.facets"]), + answers: !item["@search.answers"] + ? item["@search.answers"] + : queryAnswerResultArrayDeserializer(item["@search.answers"]), + debugInfo: !item["@search.debug"] + ? item["@search.debug"] + : debugInfoDeserializer(item["@search.debug"]), + nextPageParameters: !item["@search.nextPageParameters"] + ? item["@search.nextPageParameters"] + : searchRequestDeserializer(item["@search.nextPageParameters"]), + results: searchResultArrayDeserializer(item["value"]), + nextLink: item["@odata.nextLink"], + semanticPartialResponseReason: item["@search.semanticPartialResponseReason"], + semanticPartialResponseType: item["@search.semanticPartialResponseType"], + semanticQueryRewritesResultType: item["@search.semanticQueryRewritesResultType"], + }; +} + +export function facetResultArrayRecordSerializer( + item: Record>, +): Record { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : facetResultArraySerializer(item[key]); + }); + return result; +} + +export function facetResultArrayRecordDeserializer( + item: Record, +): Record> { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : facetResultArrayDeserializer(item[key]); + }); + return result; +} + +export function facetResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return facetResultSerializer(item); + }); +} + +export function facetResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return facetResultDeserializer(item); + }); +} + +/** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */ +export interface FacetResult { + /** The approximate count of documents falling within the bucket described by this facet. */ + count?: number; + /** The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets. */ + readonly facets?: Record; + /** The resulting total sum for the facet when a sum metric is requested. */ + readonly sum?: number; + /** Additional properties */ + additionalProperties?: Record; +} + +export function facetResultSerializer(item: FacetResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + count: item["count"], + }; +} + +export function facetResultDeserializer(item: any): FacetResult { + return { + additionalProperties: serializeRecord(item, ["count", "facets", "sum"]), + count: item["count"], + facets: !item["@search.facets"] + ? item["@search.facets"] + : facetResultArrayRecordDeserializer(item["@search.facets"]), + sum: item["sum"], + }; +} + +export function queryAnswerResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return queryAnswerResultSerializer(item); + }); +} + +export function queryAnswerResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return queryAnswerResultDeserializer(item); + }); +} + +/** An answer is a text passage extracted from the contents of the most relevant documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. */ +export interface QueryAnswerResult { + /** The score value represents how relevant the answer is to the query relative to other answers returned for the query. */ + score?: number; + /** The key of the document the answer was extracted from. */ + key?: string; + /** The text passage extracted from the document contents as the answer. */ + text?: string; + /** Same text passage as in the Text property with highlighted text phrases most relevant to the query. */ + highlights?: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function queryAnswerResultSerializer(item: QueryAnswerResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + score: item["score"], + key: item["key"], + text: item["text"], + highlights: item["highlights"], + }; +} + +export function queryAnswerResultDeserializer(item: any): QueryAnswerResult { + return { + additionalProperties: serializeRecord(item, ["score", "key", "text", "highlights"]), + score: item["score"], + key: item["key"], + text: item["text"], + highlights: item["highlights"], + }; +} + +/** Contains debugging information that can be used to further explore your search results. */ +export interface DebugInfo { + /** Contains debugging information specific to query rewrites. */ + readonly queryRewrites?: QueryRewritesDebugInfo; +} + +export function debugInfoSerializer(item: DebugInfo): any { + return item; +} + +export function debugInfoDeserializer(item: any): DebugInfo { + return { + queryRewrites: !item["queryRewrites"] + ? item["queryRewrites"] + : queryRewritesDebugInfoDeserializer(item["queryRewrites"]), + }; +} + +/** Contains debugging information specific to query rewrites. */ +export interface QueryRewritesDebugInfo { + /** List of query rewrites generated for the text query. */ + readonly text?: QueryRewritesValuesDebugInfo; + /** List of query rewrites generated for the vectorizable text queries. */ + readonly vectors?: QueryRewritesValuesDebugInfo[]; +} + +export function queryRewritesDebugInfoDeserializer(item: any): QueryRewritesDebugInfo { + return { + text: !item["text"] ? item["text"] : queryRewritesValuesDebugInfoDeserializer(item["text"]), + vectors: !item["vectors"] + ? item["vectors"] + : queryRewritesValuesDebugInfoArrayDeserializer(item["vectors"]), + }; +} + +/** Contains debugging information specific to query rewrites. */ +export interface QueryRewritesValuesDebugInfo { + /** The input text to the generative query rewriting model. There may be cases where the user query and the input to the generative model are not identical. */ + readonly inputQuery?: string; + /** List of query rewrites. */ + readonly rewrites?: string[]; +} + +export function queryRewritesValuesDebugInfoDeserializer(item: any): QueryRewritesValuesDebugInfo { + return { + inputQuery: item["inputQuery"], + rewrites: !item["rewrites"] + ? item["rewrites"] + : item["rewrites"].map((p: any) => { + return p; + }), + }; +} + +export function queryRewritesValuesDebugInfoArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return queryRewritesValuesDebugInfoDeserializer(item); + }); +} + +/** Parameters for filtering, sorting, faceting, paging, and other search query behaviors. */ +export interface SearchRequest { + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ + includeTotalCount?: boolean; + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ + facets?: string[]; + /** The OData $filter expression to apply to the search query. */ + filter?: string; + /** The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ + highlightFields?: string; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ + queryType?: QueryType; + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ + scoringStatistics?: ScoringStatistics; + /** A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ + sessionId?: string; + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ + scoringParameters?: string[]; + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ + scoringProfile?: string; + /** Enables a debugging tool that can be used to further explore your reranked results. */ + debug?: QueryDebugMode; + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; + /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ + searchFields?: string; + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ + searchMode?: SearchMode; + /** A value that specifies the language of the search query. */ + queryLanguage?: QueryLanguage; + /** A value that specified the type of the speller to use to spell-correct individual search query terms. */ + querySpeller?: QuerySpellerType; + /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ + select?: string; + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ + skip?: number; + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ + top?: number; + /** The name of a semantic configuration that will be used when processing documents for queries of type semantic. */ + semanticConfigurationName?: string; + /** Allows the user to choose whether a semantic call should fail completely (default / current behavior), or to return partial results. */ + semanticErrorHandling?: SemanticErrorMode; + /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ + semanticMaxWaitInMilliseconds?: number; + /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ + semanticQuery?: string; + /** A value that specifies whether answers should be returned as part of the search response. */ + answers?: QueryAnswerType; + /** A value that specifies whether captions should be returned as part of the search response. */ + captions?: QueryCaptionType; + /** A value that specifies whether query rewrites should be generated to augment the search query. */ + queryRewrites?: QueryRewritesType; + /** The comma-separated list of field names used for semantic ranking. */ + semanticFields?: string; + /** The query parameters for vector and hybrid search queries. */ + vectorQueries?: VectorQueryUnion[]; + /** Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. */ + vectorFilterMode?: VectorFilterMode; + /** The query parameters to configure hybrid search behaviors. */ + hybridSearch?: HybridSearch; +} + +export function searchRequestDeserializer(item: any): SearchRequest { + return { + includeTotalCount: item["count"], + facets: !item["facets"] + ? item["facets"] + : item["facets"].map((p: any) => { + return p; + }), + filter: item["filter"], + highlightFields: item["highlight"], + highlightPostTag: item["highlightPostTag"], + highlightPreTag: item["highlightPreTag"], + minimumCoverage: item["minimumCoverage"], + orderBy: item["orderby"], + queryType: item["queryType"], + scoringStatistics: item["scoringStatistics"], + sessionId: item["sessionId"], + scoringParameters: !item["scoringParameters"] + ? item["scoringParameters"] + : item["scoringParameters"].map((p: any) => { + return p; + }), + scoringProfile: item["scoringProfile"], + debug: item["debug"], + searchText: item["search"], + searchFields: item["searchFields"], + searchMode: item["searchMode"], + queryLanguage: item["queryLanguage"], + querySpeller: item["speller"], + select: item["select"], + skip: item["skip"], + top: item["top"], + semanticConfigurationName: item["semanticConfiguration"], + semanticErrorHandling: item["semanticErrorHandling"], + semanticMaxWaitInMilliseconds: item["semanticMaxWaitInMilliseconds"], + semanticQuery: item["semanticQuery"], + answers: item["answers"], + captions: item["captions"], + queryRewrites: item["queryRewrites"], + semanticFields: item["semanticFields"], + vectorQueries: !item["vectorQueries"] + ? item["vectorQueries"] + : vectorQueryUnionArrayDeserializer(item["vectorQueries"]), + vectorFilterMode: item["vectorFilterMode"], + hybridSearch: !item["hybridSearch"] + ? item["hybridSearch"] + : hybridSearchDeserializer(item["hybridSearch"]), + }; +} + +/** Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax and 'semantic' if query syntax is not needed. */ +export enum KnownQueryType { + /** Uses the simple query syntax for searches. Search text is interpreted using a simple query language that allows for symbols such as +, * and "". Queries are evaluated across all searchable fields by default, unless the searchFields parameter is specified. */ + Simple = "simple", + /** Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene query language which allows field-specific and weighted searches, as well as other advanced features. */ + Full = "full", + /** Best suited for queries expressed in natural language as opposed to keywords. Improves precision of search results by re-ranking the top search results using a ranking model trained on the Web corpus. */ + Semantic = "semantic", +} + +/** + * Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax and 'semantic' if query syntax is not needed. \ + * {@link KnownQueryType} can be used interchangeably with QueryType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **simple**: Uses the simple query syntax for searches. Search text is interpreted using a simple query language that allows for symbols such as +, * and "". Queries are evaluated across all searchable fields by default, unless the searchFields parameter is specified. \ + * **full**: Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene query language which allows field-specific and weighted searches, as well as other advanced features. \ + * **semantic**: Best suited for queries expressed in natural language as opposed to keywords. Improves precision of search results by re-ranking the top search results using a ranking model trained on the Web corpus. + */ +export type QueryType = string; + +/** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ +export enum KnownScoringStatistics { + /** The scoring statistics will be calculated locally for lower latency. */ + Local = "local", + /** The scoring statistics will be calculated globally for more consistent scoring. */ + Global = "global", +} + +/** + * A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. \ + * {@link KnownScoringStatistics} can be used interchangeably with ScoringStatistics, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **local**: The scoring statistics will be calculated locally for lower latency. \ + * **global**: The scoring statistics will be calculated globally for more consistent scoring. + */ +export type ScoringStatistics = string; + +/** Enables a debugging tool that can be used to further explore your search results. You can enable multiple debug modes simultaneously by separating them with a | character, for example: semantic|queryRewrites. */ +export enum KnownQueryDebugMode { + /** No query debugging information will be returned. */ + Disabled = "disabled", + /** Allows the user to further explore their reranked results. */ + Semantic = "semantic", + /** Allows the user to further explore their hybrid and vector query results. */ + Vector = "vector", + /** Allows the user to explore the list of query rewrites generated for their search request. */ + QueryRewrites = "queryRewrites", + /** Allows the user to retrieve scoring information regarding vectors matched within a collection of complex types. */ + InnerHits = "innerHits", + /** Turn on all debug options. */ + All = "all", +} + +/** + * Enables a debugging tool that can be used to further explore your search results. You can enable multiple debug modes simultaneously by separating them with a | character, for example: semantic|queryRewrites. \ + * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **disabled**: No query debugging information will be returned. \ + * **semantic**: Allows the user to further explore their reranked results. \ + * **vector**: Allows the user to further explore their hybrid and vector query results. \ + * **queryRewrites**: Allows the user to explore the list of query rewrites generated for their search request. \ + * **innerHits**: Allows the user to retrieve scoring information regarding vectors matched within a collection of complex types. \ + * **all**: Turn on all debug options. + */ +export type QueryDebugMode = string; + +/** Specifies whether any or all of the search terms must be matched in order to count the document as a match. */ +export enum KnownSearchMode { + /** Any of the search terms must be matched in order to count the document as a match. */ + Any = "any", + /** All of the search terms must be matched in order to count the document as a match. */ + All = "all", +} + +/** + * Specifies whether any or all of the search terms must be matched in order to count the document as a match. \ + * {@link KnownSearchMode} can be used interchangeably with SearchMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **any**: Any of the search terms must be matched in order to count the document as a match. \ + * **all**: All of the search terms must be matched in order to count the document as a match. + */ +export type SearchMode = string; + +/** The language of the query. */ +export enum KnownQueryLanguage { + /** Query language not specified. */ + None = "none", + /** Query language value for English (United States). */ + EnUs = "en-us", + /** Query language value for English (Great Britain). */ + EnGb = "en-gb", + /** Query language value for English (India). */ + EnIn = "en-in", + /** Query language value for English (Canada). */ + EnCa = "en-ca", + /** Query language value for English (Australia). */ + EnAu = "en-au", + /** Query language value for French (France). */ + FrFr = "fr-fr", + /** Query language value for French (Canada). */ + FrCa = "fr-ca", + /** Query language value for German (Germany). */ + DeDe = "de-de", + /** Query language value for Spanish (Spain). */ + EsEs = "es-es", + /** Query language value for Spanish (Mexico). */ + EsMx = "es-mx", + /** Query language value for Chinese (China). */ + ZhCn = "zh-cn", + /** Query language value for Chinese (Taiwan). */ + ZhTw = "zh-tw", + /** Query language value for Portuguese (Brazil). */ + PtBr = "pt-br", + /** Query language value for Portuguese (Portugal). */ + PtPt = "pt-pt", + /** Query language value for Italian (Italy). */ + ItIt = "it-it", + /** Query language value for Japanese (Japan). */ + JaJp = "ja-jp", + /** Query language value for Korean (Korea). */ + KoKr = "ko-kr", + /** Query language value for Russian (Russia). */ + RuRu = "ru-ru", + /** Query language value for Czech (Czech Republic). */ + CsCz = "cs-cz", + /** Query language value for Dutch (Belgium). */ + NlBe = "nl-be", + /** Query language value for Dutch (Netherlands). */ + NlNl = "nl-nl", + /** Query language value for Hungarian (Hungary). */ + HuHu = "hu-hu", + /** Query language value for Polish (Poland). */ + PlPl = "pl-pl", + /** Query language value for Swedish (Sweden). */ + SvSe = "sv-se", + /** Query language value for Turkish (Turkey). */ + TrTr = "tr-tr", + /** Query language value for Hindi (India). */ + HiIn = "hi-in", + /** Query language value for Arabic (Saudi Arabia). */ + ArSa = "ar-sa", + /** Query language value for Arabic (Egypt). */ + ArEg = "ar-eg", + /** Query language value for Arabic (Morocco). */ + ArMa = "ar-ma", + /** Query language value for Arabic (Kuwait). */ + ArKw = "ar-kw", + /** Query language value for Arabic (Jordan). */ + ArJo = "ar-jo", + /** Query language value for Danish (Denmark). */ + DaDk = "da-dk", + /** Query language value for Norwegian (Norway). */ + NoNo = "no-no", + /** Query language value for Bulgarian (Bulgaria). */ + BgBg = "bg-bg", + /** Query language value for Croatian (Croatia). */ + HrHr = "hr-hr", + /** Query language value for Croatian (Bosnia and Herzegovina). */ + HrBa = "hr-ba", + /** Query language value for Malay (Malaysia). */ + MsMy = "ms-my", + /** Query language value for Malay (Brunei Darussalam). */ + MsBn = "ms-bn", + /** Query language value for Slovenian (Slovenia). */ + SlSl = "sl-sl", + /** Query language value for Tamil (India). */ + TaIn = "ta-in", + /** Query language value for Vietnamese (Viet Nam). */ + ViVn = "vi-vn", + /** Query language value for Greek (Greece). */ + ElGr = "el-gr", + /** Query language value for Romanian (Romania). */ + RoRo = "ro-ro", + /** Query language value for Icelandic (Iceland). */ + IsIs = "is-is", + /** Query language value for Indonesian (Indonesia). */ + IdId = "id-id", + /** Query language value for Thai (Thailand). */ + ThTh = "th-th", + /** Query language value for Lithuanian (Lithuania). */ + LtLt = "lt-lt", + /** Query language value for Ukrainian (Ukraine). */ + UkUa = "uk-ua", + /** Query language value for Latvian (Latvia). */ + LvLv = "lv-lv", + /** Query language value for Estonian (Estonia). */ + EtEe = "et-ee", + /** Query language value for Catalan. */ + CaEs = "ca-es", + /** Query language value for Finnish (Finland). */ + FiFi = "fi-fi", + /** Query language value for Serbian (Bosnia and Herzegovina). */ + SrBa = "sr-ba", + /** Query language value for Serbian (Montenegro). */ + SrMe = "sr-me", + /** Query language value for Serbian (Serbia). */ + SrRs = "sr-rs", + /** Query language value for Slovak (Slovakia). */ + SkSk = "sk-sk", + /** Query language value for Norwegian (Norway). */ + NbNo = "nb-no", + /** Query language value for Armenian (Armenia). */ + HyAm = "hy-am", + /** Query language value for Bengali (India). */ + BnIn = "bn-in", + /** Query language value for Basque. */ + EuEs = "eu-es", + /** Query language value for Galician. */ + GlEs = "gl-es", + /** Query language value for Gujarati (India). */ + GuIn = "gu-in", + /** Query language value for Hebrew (Israel). */ + HeIl = "he-il", + /** Query language value for Irish (Ireland). */ + GaIe = "ga-ie", + /** Query language value for Kannada (India). */ + KnIn = "kn-in", + /** Query language value for Malayalam (India). */ + MlIn = "ml-in", + /** Query language value for Marathi (India). */ + MrIn = "mr-in", + /** Query language value for Persian (U.A.E.). */ + FaAe = "fa-ae", + /** Query language value for Punjabi (India). */ + PaIn = "pa-in", + /** Query language value for Telugu (India). */ + TeIn = "te-in", + /** Query language value for Urdu (Pakistan). */ + UrPk = "ur-pk", +} + +/** + * The language of the query. \ + * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Query language not specified. \ + * **en-us**: Query language value for English (United States). \ + * **en-gb**: Query language value for English (Great Britain). \ + * **en-in**: Query language value for English (India). \ + * **en-ca**: Query language value for English (Canada). \ + * **en-au**: Query language value for English (Australia). \ + * **fr-fr**: Query language value for French (France). \ + * **fr-ca**: Query language value for French (Canada). \ + * **de-de**: Query language value for German (Germany). \ + * **es-es**: Query language value for Spanish (Spain). \ + * **es-mx**: Query language value for Spanish (Mexico). \ + * **zh-cn**: Query language value for Chinese (China). \ + * **zh-tw**: Query language value for Chinese (Taiwan). \ + * **pt-br**: Query language value for Portuguese (Brazil). \ + * **pt-pt**: Query language value for Portuguese (Portugal). \ + * **it-it**: Query language value for Italian (Italy). \ + * **ja-jp**: Query language value for Japanese (Japan). \ + * **ko-kr**: Query language value for Korean (Korea). \ + * **ru-ru**: Query language value for Russian (Russia). \ + * **cs-cz**: Query language value for Czech (Czech Republic). \ + * **nl-be**: Query language value for Dutch (Belgium). \ + * **nl-nl**: Query language value for Dutch (Netherlands). \ + * **hu-hu**: Query language value for Hungarian (Hungary). \ + * **pl-pl**: Query language value for Polish (Poland). \ + * **sv-se**: Query language value for Swedish (Sweden). \ + * **tr-tr**: Query language value for Turkish (Turkey). \ + * **hi-in**: Query language value for Hindi (India). \ + * **ar-sa**: Query language value for Arabic (Saudi Arabia). \ + * **ar-eg**: Query language value for Arabic (Egypt). \ + * **ar-ma**: Query language value for Arabic (Morocco). \ + * **ar-kw**: Query language value for Arabic (Kuwait). \ + * **ar-jo**: Query language value for Arabic (Jordan). \ + * **da-dk**: Query language value for Danish (Denmark). \ + * **no-no**: Query language value for Norwegian (Norway). \ + * **bg-bg**: Query language value for Bulgarian (Bulgaria). \ + * **hr-hr**: Query language value for Croatian (Croatia). \ + * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \ + * **ms-my**: Query language value for Malay (Malaysia). \ + * **ms-bn**: Query language value for Malay (Brunei Darussalam). \ + * **sl-sl**: Query language value for Slovenian (Slovenia). \ + * **ta-in**: Query language value for Tamil (India). \ + * **vi-vn**: Query language value for Vietnamese (Viet Nam). \ + * **el-gr**: Query language value for Greek (Greece). \ + * **ro-ro**: Query language value for Romanian (Romania). \ + * **is-is**: Query language value for Icelandic (Iceland). \ + * **id-id**: Query language value for Indonesian (Indonesia). \ + * **th-th**: Query language value for Thai (Thailand). \ + * **lt-lt**: Query language value for Lithuanian (Lithuania). \ + * **uk-ua**: Query language value for Ukrainian (Ukraine). \ + * **lv-lv**: Query language value for Latvian (Latvia). \ + * **et-ee**: Query language value for Estonian (Estonia). \ + * **ca-es**: Query language value for Catalan. \ + * **fi-fi**: Query language value for Finnish (Finland). \ + * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \ + * **sr-me**: Query language value for Serbian (Montenegro). \ + * **sr-rs**: Query language value for Serbian (Serbia). \ + * **sk-sk**: Query language value for Slovak (Slovakia). \ + * **nb-no**: Query language value for Norwegian (Norway). \ + * **hy-am**: Query language value for Armenian (Armenia). \ + * **bn-in**: Query language value for Bengali (India). \ + * **eu-es**: Query language value for Basque. \ + * **gl-es**: Query language value for Galician. \ + * **gu-in**: Query language value for Gujarati (India). \ + * **he-il**: Query language value for Hebrew (Israel). \ + * **ga-ie**: Query language value for Irish (Ireland). \ + * **kn-in**: Query language value for Kannada (India). \ + * **ml-in**: Query language value for Malayalam (India). \ + * **mr-in**: Query language value for Marathi (India). \ + * **fa-ae**: Query language value for Persian (U.A.E.). \ + * **pa-in**: Query language value for Punjabi (India). \ + * **te-in**: Query language value for Telugu (India). \ + * **ur-pk**: Query language value for Urdu (Pakistan). + */ +export type QueryLanguage = string; + +/** Improve search recall by spell-correcting individual search query terms. */ +export enum KnownQuerySpellerType { + /** Speller not enabled. */ + None = "none", + /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */ + Lexicon = "lexicon", +} + +/** + * Improve search recall by spell-correcting individual search query terms. \ + * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Speller not enabled. \ + * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. + */ +export type QuerySpellerType = string; + +/** Allows the user to choose whether a semantic call should fail completely, or to return partial results. */ +export enum KnownSemanticErrorMode { + /** If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. */ + Partial = "partial", + /** If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. */ + Fail = "fail", +} + +/** + * Allows the user to choose whether a semantic call should fail completely, or to return partial results. \ + * {@link KnownSemanticErrorMode} can be used interchangeably with SemanticErrorMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **partial**: If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. \ + * **fail**: If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. + */ +export type SemanticErrorMode = string; + +/** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ +export enum KnownQueryAnswerType { + /** Do not return answers for the query. */ + None = "none", + /** Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. */ + Extractive = "extractive", +} + +/** + * This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. \ + * {@link KnownQueryAnswerType} can be used interchangeably with QueryAnswerType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Do not return answers for the query. \ + * **extractive**: Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. + */ +export type QueryAnswerType = string; + +/** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ +export enum KnownQueryCaptionType { + /** Do not return captions for the query. */ + None = "none", + /** Extracts captions from the matching documents that contain passages relevant to the search query. */ + Extractive = "extractive", +} + +/** + * This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. \ + * {@link KnownQueryCaptionType} can be used interchangeably with QueryCaptionType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Do not return captions for the query. \ + * **extractive**: Extracts captions from the matching documents that contain passages relevant to the search query. + */ +export type QueryCaptionType = string; + +/** This parameter is only valid if the query type is `semantic`. When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. */ +export enum KnownQueryRewritesType { + /** Do not generate additional query rewrites for this query. */ + None = "none", + /** Generate alternative query terms to increase the recall of a search request. */ + Generative = "generative", +} + +/** + * This parameter is only valid if the query type is `semantic`. When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. \ + * {@link KnownQueryRewritesType} can be used interchangeably with QueryRewritesType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **none**: Do not generate additional query rewrites for this query. \ + * **generative**: Generate alternative query terms to increase the recall of a search request. + */ +export type QueryRewritesType = string; + +export function vectorQueryUnionArraySerializer(result: Array): any[] { + return result.map((item) => { + return vectorQueryUnionSerializer(item); + }); +} + +export function vectorQueryUnionArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return vectorQueryUnionDeserializer(item); + }); +} + +/** The query parameters for vector and hybrid search queries. */ +export interface VectorQuery { + /** Number of nearest neighbors to return as top hits. */ + kNearestNeighbors?: number; + /** Vector Fields of type Collection(Edm.Single) to be included in the vector searched. */ + fields?: string; + /** When true, triggers an exhaustive k-nearest neighbor search across all vectors within the vector index. Useful for scenarios where exact matches are critical, such as determining ground truth values. */ + exhaustive?: boolean; + /** Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method is used on the underlying vector field. */ + oversampling?: number; + /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */ + weight?: number; + /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */ + threshold?: VectorThresholdUnion; + /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead. */ + filterOverride?: string; + /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead. */ + perDocumentVectorLimit?: number; + /** Type of query. */ + /** The discriminator possible values: vector, text, imageUrl, imageBinary */ + kind: VectorQueryKind; +} + +export function vectorQuerySerializer(item: VectorQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + }; +} + +export function vectorQueryDeserializer(item: any): VectorQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + }; +} + +/** Alias for VectorQueryUnion */ +export type VectorQueryUnion = + | VectorizedQuery + | VectorizableTextQuery + | VectorizableImageUrlQuery + | VectorizableImageBinaryQuery + | VectorQuery; + +export function vectorQueryUnionSerializer(item: VectorQueryUnion): any { + switch (item.kind) { + case "vector": + return vectorizedQuerySerializer(item as VectorizedQuery); + + case "text": + return vectorizableTextQuerySerializer(item as VectorizableTextQuery); + + case "imageUrl": + return vectorizableImageUrlQuerySerializer(item as VectorizableImageUrlQuery); + + case "imageBinary": + return vectorizableImageBinaryQuerySerializer(item as VectorizableImageBinaryQuery); + + default: + return vectorQuerySerializer(item); + } +} + +export function vectorQueryUnionDeserializer(item: any): VectorQueryUnion { + switch (item.kind) { + case "vector": + return vectorizedQueryDeserializer(item as VectorizedQuery); + + case "text": + return vectorizableTextQueryDeserializer(item as VectorizableTextQuery); + + case "imageUrl": + return vectorizableImageUrlQueryDeserializer(item as VectorizableImageUrlQuery); + + case "imageBinary": + return vectorizableImageBinaryQueryDeserializer(item as VectorizableImageBinaryQuery); + + default: + return vectorQueryDeserializer(item); + } +} + +/** The threshold used for vector queries. */ +export interface VectorThreshold { + /** Type of threshold. */ + /** The discriminator possible values: vectorSimilarity, searchScore */ + kind: VectorThresholdKind; +} + +export function vectorThresholdSerializer(item: VectorThreshold): any { + return { kind: item["kind"] }; +} + +export function vectorThresholdDeserializer(item: any): VectorThreshold { + return { + kind: item["kind"], + }; +} + +/** Alias for VectorThresholdUnion */ +export type VectorThresholdUnion = + | VectorSimilarityThreshold + | SearchScoreThreshold + | VectorThreshold; + +export function vectorThresholdUnionSerializer(item: VectorThresholdUnion): any { + switch (item.kind) { + case "vectorSimilarity": + return vectorSimilarityThresholdSerializer(item as VectorSimilarityThreshold); + + case "searchScore": + return searchScoreThresholdSerializer(item as SearchScoreThreshold); + + default: + return vectorThresholdSerializer(item); + } +} + +export function vectorThresholdUnionDeserializer(item: any): VectorThresholdUnion { + switch (item.kind) { + case "vectorSimilarity": + return vectorSimilarityThresholdDeserializer(item as VectorSimilarityThreshold); + + case "searchScore": + return searchScoreThresholdDeserializer(item as SearchScoreThreshold); + + default: + return vectorThresholdDeserializer(item); + } +} + +/** The kind of vector query being performed. */ +export enum KnownVectorThresholdKind { + /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ + VectorSimilarity = "vectorSimilarity", + /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ + SearchScore = "searchScore", +} + +/** + * The kind of vector query being performed. \ + * {@link KnownVectorThresholdKind} can be used interchangeably with VectorThresholdKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **vectorSimilarity**: The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. \ + * **searchScore**: The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. + */ +export type VectorThresholdKind = string; + +/** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ +export interface VectorSimilarityThreshold extends VectorThreshold { + /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ + value: number; + /** The kind of threshold used to filter vector queries */ + kind: "vectorSimilarity"; +} + +export function vectorSimilarityThresholdSerializer(item: VectorSimilarityThreshold): any { + return { kind: item["kind"], value: item["value"] }; +} + +export function vectorSimilarityThresholdDeserializer(item: any): VectorSimilarityThreshold { + return { + kind: item["kind"], + value: item["value"], + }; +} + +/** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ +export interface SearchScoreThreshold extends VectorThreshold { + /** The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ + value: number; + /** The kind of threshold used to filter vector queries */ + kind: "searchScore"; +} + +export function searchScoreThresholdSerializer(item: SearchScoreThreshold): any { + return { kind: item["kind"], value: item["value"] }; +} + +export function searchScoreThresholdDeserializer(item: any): SearchScoreThreshold { + return { + kind: item["kind"], + value: item["value"], + }; +} + +/** The kind of vector query being performed. */ +export enum KnownVectorQueryKind { + /** Vector query where a raw vector value is provided. */ + Vector = "vector", + /** Vector query where a text value that needs to be vectorized is provided. */ + Text = "text", + /** Vector query where an url that represents an image value that needs to be vectorized is provided. */ + ImageUrl = "imageUrl", + /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */ + ImageBinary = "imageBinary", +} + +/** + * The kind of vector query being performed. \ + * {@link KnownVectorQueryKind} can be used interchangeably with VectorQueryKind, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **vector**: Vector query where a raw vector value is provided. \ + * **text**: Vector query where a text value that needs to be vectorized is provided. \ + * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided. \ + * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. + */ +export type VectorQueryKind = string; + +/** The query parameters to use for vector search when a raw vector value is provided. */ +export interface VectorizedQuery extends VectorQuery { + /** The vector representation of a search query. */ + vector: number[]; + /** The kind of vector query being performed. */ + kind: "vector"; +} + +export function vectorizedQuerySerializer(item: VectorizedQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + vector: item["vector"].map((p: any) => { + return p; + }), + }; +} + +export function vectorizedQueryDeserializer(item: any): VectorizedQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + vector: item["vector"].map((p: any) => { + return p; + }), + }; +} + +/** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */ +export interface VectorizableTextQuery extends VectorQuery { + /** The text to be vectorized to perform a vector search query. */ + text: string; + /** Can be configured to let a generative model rewrite the query before sending it to be vectorized. */ + queryRewrites?: QueryRewritesType; + /** The kind of vector query being performed. */ + kind: "text"; +} + +export function vectorizableTextQuerySerializer(item: VectorizableTextQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + text: item["text"], + queryRewrites: item["queryRewrites"], + }; +} + +export function vectorizableTextQueryDeserializer(item: any): VectorizableTextQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + text: item["text"], + queryRewrites: item["queryRewrites"], + }; +} + +/** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */ +export interface VectorizableImageUrlQuery extends VectorQuery { + /** The URL of an image to be vectorized to perform a vector search query. */ + url?: string; + /** The kind of vector query being performed. */ + kind: "imageUrl"; +} + +export function vectorizableImageUrlQuerySerializer(item: VectorizableImageUrlQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + url: item["url"], + }; +} + +export function vectorizableImageUrlQueryDeserializer(item: any): VectorizableImageUrlQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + url: item["url"], + }; +} + +/** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */ +export interface VectorizableImageBinaryQuery extends VectorQuery { + /** The base 64 encoded binary of an image to be vectorized to perform a vector search query. */ + base64Image?: string; + /** The kind of vector query being performed. */ + kind: "imageBinary"; +} + +export function vectorizableImageBinaryQuerySerializer(item: VectorizableImageBinaryQuery): any { + return { + k: item["kNearestNeighbors"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionSerializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + base64Image: item["base64Image"], + }; +} + +export function vectorizableImageBinaryQueryDeserializer(item: any): VectorizableImageBinaryQuery { + return { + kNearestNeighbors: item["k"], + fields: item["fields"], + exhaustive: item["exhaustive"], + oversampling: item["oversampling"], + weight: item["weight"], + threshold: !item["threshold"] + ? item["threshold"] + : vectorThresholdUnionDeserializer(item["threshold"]), + filterOverride: item["filterOverride"], + perDocumentVectorLimit: item["perDocumentVectorLimit"], + kind: item["kind"], + base64Image: item["base64Image"], + }; +} + +/** Determines whether or not filters are applied before or after the vector search is performed. */ +export enum KnownVectorFilterMode { + /** The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. */ + PostFilter = "postFilter", + /** The filter will be applied before the search query. */ + PreFilter = "preFilter", + /** The filter will be applied after the global top-k candidate set of vector results is returned. */ + StrictPostFilter = "strictPostFilter", +} + +/** + * Determines whether or not filters are applied before or after the vector search is performed. \ + * {@link KnownVectorFilterMode} can be used interchangeably with VectorFilterMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **postFilter**: The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. \ + * **preFilter**: The filter will be applied before the search query. \ + * **strictPostFilter**: The filter will be applied after the global top-k candidate set of vector results is returned. + */ +export type VectorFilterMode = string; + +/** TThe query parameters to configure hybrid search behaviors. */ +export interface HybridSearch { + /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */ + maxTextRecallSize?: number; + /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */ + countAndFacetMode?: HybridCountAndFacetMode; +} + +export function hybridSearchSerializer(item: HybridSearch): any { + return { + maxTextRecallSize: item["maxTextRecallSize"], + countAndFacetMode: item["countAndFacetMode"], + }; +} + +export function hybridSearchDeserializer(item: any): HybridSearch { + return { + maxTextRecallSize: item["maxTextRecallSize"], + countAndFacetMode: item["countAndFacetMode"], + }; +} + +/** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. The default value is 'countAllResults'. */ +export enum KnownHybridCountAndFacetMode { + /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */ + CountRetrievableResults = "countRetrievableResults", + /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */ + CountAllResults = "countAllResults", +} + +/** + * Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. The default value is 'countAllResults'. \ + * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \ + * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. + */ +export type HybridCountAndFacetMode = string; + +export function searchResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return searchResultSerializer(item); + }); +} + +export function searchResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return searchResultDeserializer(item); + }); +} + +/** Contains a document found by a search query, plus associated metadata. */ +export interface SearchResult { + /** The relevance score of the document compared to other documents returned by the query. */ + score: number; + /** The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'. */ + rerankerScore?: number; + /** The relevance score computed by boosting the Reranker Score. Search results are sorted by the RerankerScore/RerankerBoostedScore based on useScoringProfileBoostedRanking in the Semantic Config. RerankerBoostedScore is only returned for queries of type 'semantic'. */ + rerankerBoostedScore?: number; + /** Text fragments from the document that indicate the matching search terms, organized by each applicable field; null if hit highlighting was not enabled for the query. */ + highlights?: Record; + /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'. */ + captions?: QueryCaptionResult[]; + /** Contains debugging information that can be used to further explore your search results. */ + readonly documentDebugInfo?: DocumentDebugInfo[]; + /** Additional properties */ + additionalProperties?: Record; +} + +export function searchResultSerializer(item: SearchResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + "@search.score": item["score"], + "@search.rerankerScore": item["rerankerScore"], + "@search.rerankerBoostedScore": item["rerankerBoostedScore"], + "@search.highlights": item["highlights"], + "@search.captions": !item["captions"] + ? item["captions"] + : queryCaptionResultArraySerializer(item["captions"]), + }; +} + +export function searchResultDeserializer(item: any): SearchResult { + return { + additionalProperties: serializeRecord(item, [ + "score", + "rerankerScore", + "rerankerBoostedScore", + "highlights", + "captions", + "documentDebugInfo", + ]), + score: item["@search.score"], + rerankerScore: item["@search.rerankerScore"], + rerankerBoostedScore: item["@search.rerankerBoostedScore"], + highlights: item["@search.highlights"], + captions: !item["@search.captions"] + ? item["@search.captions"] + : queryCaptionResultArrayDeserializer(item["@search.captions"]), + documentDebugInfo: !item["@search.documentDebugInfo"] + ? item["@search.documentDebugInfo"] + : documentDebugInfoArrayDeserializer(item["@search.documentDebugInfo"]), + }; +} + +export function queryCaptionResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return queryCaptionResultSerializer(item); + }); +} + +export function queryCaptionResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return queryCaptionResultDeserializer(item); + }); +} + +/** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */ +export interface QueryCaptionResult { + /** A representative text passage extracted from the document most relevant to the search query. */ + text?: string; + /** Same text passage as in the Text property with highlighted phrases most relevant to the query. */ + highlights?: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function queryCaptionResultSerializer(item: QueryCaptionResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + text: item["text"], + highlights: item["highlights"], + }; +} + +export function queryCaptionResultDeserializer(item: any): QueryCaptionResult { + return { + additionalProperties: serializeRecord(item, ["text", "highlights"]), + text: item["text"], + highlights: item["highlights"], + }; +} + +export function documentDebugInfoArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return documentDebugInfoDeserializer(item); + }); +} + +/** Contains debugging information that can be used to further explore your search results. */ +export interface DocumentDebugInfo { + /** Contains debugging information specific to semantic ranking requests. */ + readonly semantic?: SemanticDebugInfo; + /** Contains debugging information specific to vector and hybrid search. */ + readonly vectors?: VectorsDebugInfo; + /** Contains debugging information specific to vectors matched within a collection of complex types. */ + readonly innerHits?: Record; +} + +export function documentDebugInfoDeserializer(item: any): DocumentDebugInfo { + return { + semantic: !item["semantic"] + ? item["semantic"] + : semanticDebugInfoDeserializer(item["semantic"]), + vectors: !item["vectors"] ? item["vectors"] : vectorsDebugInfoDeserializer(item["vectors"]), + innerHits: !item["innerHits"] + ? item["innerHits"] + : queryResultDocumentInnerHitArrayRecordDeserializer(item["innerHits"]), + }; +} + +/** Contains debugging information specific to semantic ranking requests. */ +export interface SemanticDebugInfo { + /** The title field that was sent to the semantic enrichment process, as well as how it was used */ + readonly titleField?: QueryResultDocumentSemanticField; + /** The content fields that were sent to the semantic enrichment process, as well as how they were used */ + readonly contentFields?: QueryResultDocumentSemanticField[]; + /** The keyword fields that were sent to the semantic enrichment process, as well as how they were used */ + readonly keywordFields?: QueryResultDocumentSemanticField[]; + /** The raw concatenated strings that were sent to the semantic enrichment process. */ + readonly rerankerInput?: QueryResultDocumentRerankerInput; +} + +export function semanticDebugInfoDeserializer(item: any): SemanticDebugInfo { + return { + titleField: !item["titleField"] + ? item["titleField"] + : queryResultDocumentSemanticFieldDeserializer(item["titleField"]), + contentFields: !item["contentFields"] + ? item["contentFields"] + : queryResultDocumentSemanticFieldArrayDeserializer(item["contentFields"]), + keywordFields: !item["keywordFields"] + ? item["keywordFields"] + : queryResultDocumentSemanticFieldArrayDeserializer(item["keywordFields"]), + rerankerInput: !item["rerankerInput"] + ? item["rerankerInput"] + : queryResultDocumentRerankerInputDeserializer(item["rerankerInput"]), + }; +} + +/** Description of fields that were sent to the semantic enrichment process, as well as how they were used */ +export interface QueryResultDocumentSemanticField { + /** The name of the field that was sent to the semantic enrichment process */ + readonly name?: string; + /** The way the field was used for the semantic enrichment process (fully used, partially used, or unused) */ + readonly state?: SemanticFieldState; +} + +export function queryResultDocumentSemanticFieldDeserializer( + item: any, +): QueryResultDocumentSemanticField { + return { + name: item["name"], + state: item["state"], + }; +} + +/** The way the field was used for the semantic enrichment process. */ +export enum KnownSemanticFieldState { + /** The field was fully used for semantic enrichment. */ + Used = "used", + /** The field was not used for semantic enrichment. */ + Unused = "unused", + /** The field was partially used for semantic enrichment. */ + Partial = "partial", +} + +/** + * The way the field was used for the semantic enrichment process. \ + * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **used**: The field was fully used for semantic enrichment. \ + * **unused**: The field was not used for semantic enrichment. \ + * **partial**: The field was partially used for semantic enrichment. + */ +export type SemanticFieldState = string; + +export function queryResultDocumentSemanticFieldArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return queryResultDocumentSemanticFieldDeserializer(item); + }); +} + +/** The raw concatenated strings that were sent to the semantic enrichment process. */ +export interface QueryResultDocumentRerankerInput { + /** The raw string for the title field that was used for semantic enrichment. */ + readonly title?: string; + /** The raw concatenated strings for the content fields that were used for semantic enrichment. */ + readonly content?: string; + /** The raw concatenated strings for the keyword fields that were used for semantic enrichment. */ + readonly keywords?: string; +} + +export function queryResultDocumentRerankerInputDeserializer( + item: any, +): QueryResultDocumentRerankerInput { + return { + title: item["title"], + content: item["content"], + keywords: item["keywords"], + }; +} + +/** "Contains debugging information specific to vector and hybrid search.") */ +export interface VectorsDebugInfo { + /** The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF. */ + readonly subscores?: QueryResultDocumentSubscores; +} + +export function vectorsDebugInfoDeserializer(item: any): VectorsDebugInfo { + return { + subscores: !item["subscores"] + ? item["subscores"] + : queryResultDocumentSubscoresDeserializer(item["subscores"]), + }; +} + +/** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */ +export interface QueryResultDocumentSubscores { + /** The BM25 or Classic score for the text portion of the query. */ + readonly text?: TextResult; + /** The vector similarity and @search.score values for each vector query. */ + readonly vectors?: Record[]; + /** The BM25 or Classic score for the text portion of the query. */ + readonly documentBoost?: number; +} + +export function queryResultDocumentSubscoresDeserializer(item: any): QueryResultDocumentSubscores { + return { + text: !item["text"] ? item["text"] : textResultDeserializer(item["text"]), + vectors: !item["vectors"] + ? item["vectors"] + : singleVectorFieldResultRecordArrayDeserializer(item["vectors"]), + documentBoost: item["documentBoost"], + }; +} + +/** The BM25 or Classic score for the text portion of the query. */ +export interface TextResult { + /** The BM25 or Classic score for the text portion of the query. */ + readonly searchScore?: number; +} + +export function textResultDeserializer(item: any): TextResult { + return { + searchScore: item["searchScore"], + }; +} + +export function singleVectorFieldResultRecordArrayDeserializer( + result: Array>, +): any[] { + return result.map((item) => { + return singleVectorFieldResultRecordDeserializer(item); + }); +} + +export function singleVectorFieldResultRecordDeserializer( + item: Record, +): Record { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : singleVectorFieldResultDeserializer(item[key]); + }); + return result; +} + +/** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */ +export interface SingleVectorFieldResult { + /** The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query. */ + readonly searchScore?: number; + /** The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance. */ + readonly vectorSimilarity?: number; +} + +export function singleVectorFieldResultDeserializer(item: any): SingleVectorFieldResult { + return { + searchScore: item["searchScore"], + vectorSimilarity: item["vectorSimilarity"], + }; +} + +export function queryResultDocumentInnerHitArrayRecordDeserializer( + item: Record, +): Record> { + const result: Record = {}; + Object.keys(item).map((key) => { + result[key] = !item[key] ? item[key] : queryResultDocumentInnerHitArrayDeserializer(item[key]); + }); + return result; +} + +export function queryResultDocumentInnerHitArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return queryResultDocumentInnerHitDeserializer(item); + }); +} + +/** Detailed scoring information for an individual element of a complex collection. */ +export interface QueryResultDocumentInnerHit { + /** Position of this specific matching element within it's original collection. Position starts at 0. */ + readonly ordinal?: number; + /** Detailed scoring information for an individual element of a complex collection that matched a vector query. */ + readonly vectors?: Record[]; +} + +export function queryResultDocumentInnerHitDeserializer(item: any): QueryResultDocumentInnerHit { + return { + ordinal: item["ordinal"], + vectors: !item["vectors"] + ? item["vectors"] + : singleVectorFieldResultRecordArrayDeserializer(item["vectors"]), + }; +} + +/** Reason that a partial response was returned for a semantic ranking request. */ +export enum KnownSemanticErrorReason { + /** If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. */ + MaxWaitExceeded = "maxWaitExceeded", + /** The request was throttled. Only the base results were returned. */ + CapacityOverloaded = "capacityOverloaded", + /** At least one step of the semantic process failed. */ + Transient = "transient", +} + +/** + * Reason that a partial response was returned for a semantic ranking request. \ + * {@link KnownSemanticErrorReason} can be used interchangeably with SemanticErrorReason, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **maxWaitExceeded**: If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. \ + * **capacityOverloaded**: The request was throttled. Only the base results were returned. \ + * **transient**: At least one step of the semantic process failed. + */ +export type SemanticErrorReason = string; + +/** Type of partial response that was returned for a semantic ranking request. */ +export enum KnownSemanticSearchResultsType { + /** Results without any semantic enrichment or reranking. */ + BaseResults = "baseResults", + /** Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. */ + RerankedResults = "rerankedResults", +} + +/** + * Type of partial response that was returned for a semantic ranking request. \ + * {@link KnownSemanticSearchResultsType} can be used interchangeably with SemanticSearchResultsType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **baseResults**: Results without any semantic enrichment or reranking. \ + * **rerankedResults**: Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. + */ +export type SemanticSearchResultsType = string; + +/** Type of query rewrite that was used for this request. */ +export enum KnownSemanticQueryRewritesResultType { + /** Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. */ + OriginalQueryOnly = "originalQueryOnly", +} + +/** + * Type of query rewrite that was used for this request. \ + * {@link KnownSemanticQueryRewritesResultType} can be used interchangeably with SemanticQueryRewritesResultType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **originalQueryOnly**: Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. + */ +export type SemanticQueryRewritesResultType = string; + +/** A document retrieved via a document lookup operation. */ +export interface LookupDocument { + /** Additional properties */ + additionalProperties?: Record; +} + +export function lookupDocumentDeserializer(item: any): LookupDocument { + return { + additionalProperties: serializeRecord(item, []), + }; +} + +/** Response containing suggestion query results from an index. */ +export interface SuggestDocumentsResult { + /** The sequence of results returned by the query. */ + results: SuggestResult[]; + /** A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not set in the request. */ + coverage?: number; +} + +export function suggestDocumentsResultDeserializer(item: any): SuggestDocumentsResult { + return { + results: suggestResultArrayDeserializer(item["value"]), + coverage: item["@search.coverage"], + }; +} + +export function suggestResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return suggestResultSerializer(item); + }); +} + +export function suggestResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return suggestResultDeserializer(item); + }); +} + +/** A result containing a document found by a suggestion query, plus associated metadata. */ +export interface SuggestResult { + /** The text of the suggestion result. */ + text: string; + /** Additional properties */ + additionalProperties?: Record; +} + +export function suggestResultSerializer(item: SuggestResult): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + "@search.text": item["text"], + }; +} + +export function suggestResultDeserializer(item: any): SuggestResult { + return { + additionalProperties: serializeRecord(item, ["text"]), + text: item["@search.text"], + }; +} + +/** Contains a batch of document write actions to send to the index. */ +export interface IndexDocumentsBatch { + /** The actions in the batch. */ + actions: IndexAction[]; +} + +export function indexDocumentsBatchSerializer(item: IndexDocumentsBatch): any { + return { value: indexActionArraySerializer(item["actions"]) }; +} + +export function indexActionArraySerializer(result: Array): any[] { + return result.map((item) => { + return indexActionSerializer(item); + }); +} + +/** Represents an index action that operates on a document. */ +export interface IndexAction { + /** The operation to perform on a document in an indexing batch. */ + actionType?: IndexActionType; + /** Additional properties */ + additionalProperties?: Record; +} + +export function indexActionSerializer(item: IndexAction): any { + return { + ...serializeRecord(item.additionalProperties ?? {}), + "@search.action": item["actionType"], + }; +} + +/** The operation to perform on a document in an indexing batch. */ +export enum KnownIndexActionType { + /** Inserts the document into the index if it is new and updates it if it exists. All fields are replaced in the update case. */ + Upload = "upload", + /** Merges the specified field values with an existing document. If the document does not exist, the merge will fail. Any field you specify in a merge will replace the existing field in the document. This also applies to collections of primitive and complex types. */ + Merge = "merge", + /** Behaves like merge if a document with the given key already exists in the index. If the document does not exist, it behaves like upload with a new document. */ + MergeOrUpload = "mergeOrUpload", + /** Removes the specified document from the index. Any field you specify in a delete operation other than the key field will be ignored. If you want to remove an individual field from a document, use merge instead and set the field explicitly to null. */ + Delete = "delete", +} + +/** + * The operation to perform on a document in an indexing batch. \ + * {@link KnownIndexActionType} can be used interchangeably with IndexActionType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **upload**: Inserts the document into the index if it is new and updates it if it exists. All fields are replaced in the update case. \ + * **merge**: Merges the specified field values with an existing document. If the document does not exist, the merge will fail. Any field you specify in a merge will replace the existing field in the document. This also applies to collections of primitive and complex types. \ + * **mergeOrUpload**: Behaves like merge if a document with the given key already exists in the index. If the document does not exist, it behaves like upload with a new document. \ + * **delete**: Removes the specified document from the index. Any field you specify in a delete operation other than the key field will be ignored. If you want to remove an individual field from a document, use merge instead and set the field explicitly to null. + */ +export type IndexActionType = string; + +/** Response containing the status of operations for all documents in the indexing request. */ +export interface IndexDocumentsResult { + /** The list of status information for each document in the indexing request. */ + results: IndexingResult[]; +} + +export function indexDocumentsResultDeserializer(item: any): IndexDocumentsResult { + return { + results: indexingResultArrayDeserializer(item["value"]), + }; +} + +export function indexingResultArraySerializer(result: Array): any[] { + return result.map((item) => { + return indexingResultSerializer(item); + }); +} + +export function indexingResultArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return indexingResultDeserializer(item); + }); +} + +/** Status of an indexing operation for a single document. */ +export interface IndexingResult { + /** The key of a document that was in the indexing request. */ + key: string; + /** The error message explaining why the indexing operation failed for the document identified by the key; null if indexing succeeded. */ + errorMessage?: string; + /** A value indicating whether the indexing operation succeeded for the document identified by the key. */ + succeeded: boolean; + /** The status code of the indexing operation. Possible values include: 200 for a successful update or delete, 201 for successful document creation, 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. */ + statusCode: number; +} + +export function indexingResultSerializer(item: IndexingResult): any { + return { + key: item["key"], + errorMessage: item["errorMessage"], + status: item["succeeded"], + statusCode: item["statusCode"], + }; +} + +export function indexingResultDeserializer(item: any): IndexingResult { + return { + key: item["key"], + errorMessage: item["errorMessage"], + succeeded: item["status"], + statusCode: item["statusCode"], + }; +} + +/** The result of Autocomplete query. */ +export interface AutocompleteResult { + /** A value indicating the percentage of the index that was considered by the autocomplete request, or null if minimumCoverage was not specified in the request. */ + coverage?: number; + /** The list of returned Autocompleted items. */ + results: AutocompleteItem[]; +} + +export function autocompleteResultDeserializer(item: any): AutocompleteResult { + return { + coverage: item["@search.coverage"], + results: autocompleteItemArrayDeserializer(item["value"]), + }; +} + +export function autocompleteItemArraySerializer(result: Array): any[] { + return result.map((item) => { + return autocompleteItemSerializer(item); + }); +} + +export function autocompleteItemArrayDeserializer(result: Array): any[] { + return result.map((item) => { + return autocompleteItemDeserializer(item); + }); +} + +/** The result of Autocomplete requests. */ +export interface AutocompleteItem { + /** The completed term. */ + text: string; + /** The query along with the completed term. */ + queryPlusText: string; +} + +export function autocompleteItemSerializer(item: AutocompleteItem): any { + return { text: item["text"], queryPlusText: item["queryPlusText"] }; +} + +export function autocompleteItemDeserializer(item: any): AutocompleteItem { + return { + text: item["text"], + queryPlusText: item["queryPlusText"], + }; +} + +/** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context in producing autocomplete terms. */ +export enum KnownAutocompleteMode { + /** Only one term is suggested. If the query has two terms, only the last term is completed. For example, if the input is 'washington medic', the suggested terms could include 'medicaid', 'medicare', and 'medicine'. */ + OneTerm = "oneTerm", + /** Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', the suggested terms could include 'medicare coverage' and 'medical assistant'. */ + TwoTerms = "twoTerms", + /** Completes the last term in a query with two or more terms, where the last two terms are a phrase that exists in the index. For example, if the input is 'washington medic', the suggested terms could include 'washington medicaid' and 'washington medical'. */ + OneTermWithContext = "oneTermWithContext", +} + +/** + * Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context in producing autocomplete terms. \ + * {@link KnownAutocompleteMode} can be used interchangeably with AutocompleteMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneTerm**: Only one term is suggested. If the query has two terms, only the last term is completed. For example, if the input is 'washington medic', the suggested terms could include 'medicaid', 'medicare', and 'medicine'. \ + * **twoTerms**: Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', the suggested terms could include 'medicare coverage' and 'medical assistant'. \ + * **oneTermWithContext**: Completes the last term in a query with two or more terms, where the last two terms are a phrase that exists in the index. For example, if the input is 'washington medic', the suggested terms could include 'washington medicaid' and 'washington medical'. + */ +export type AutocompleteMode = string; diff --git a/sdk/search/search-documents/src/models/index.ts b/sdk/search/search-documents/src/models/index.ts new file mode 100644 index 000000000000..7b17a4980ee4 --- /dev/null +++ b/sdk/search/search-documents/src/models/index.ts @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + IndexedSharePointKnowledgeSource, + IndexedSharePointKnowledgeSourceParameters, + KnowledgeSourceIngestionParameters, + AIServices, + KnownKnowledgeSourceIngestionPermissionOption, + KnowledgeSourceIngestionPermissionOption, + KnownKnowledgeSourceContentExtractionMode, + KnowledgeSourceContentExtractionMode, + IndexedOneLakeKnowledgeSource, + IndexedOneLakeKnowledgeSourceParameters, + WebKnowledgeSource, + WebKnowledgeSourceParameters, + WebKnowledgeSourceDomains, + WebKnowledgeSourceDomain, + RemoteSharePointKnowledgeSource, + RemoteSharePointKnowledgeSourceParameters, + ServiceIndexersRuntime, + IndexerRuntime, + KnownKnowledgeBaseActivityRecordType, + KnowledgeBaseActivityRecordType, + KnownKnowledgeBaseReferenceType, + KnowledgeBaseReferenceType, + KnowledgeSourceStatus, + KnownKnowledgeSourceSynchronizationStatus, + KnowledgeSourceSynchronizationStatus, + SynchronizationState, + CompletedSynchronizationState, + KnowledgeSourceStatistics, + KnownVersions, +} from "./models.js"; diff --git a/sdk/search/search-documents/src/models/models.ts b/sdk/search/search-documents/src/models/models.ts new file mode 100644 index 000000000000..b7e2111babe4 --- /dev/null +++ b/sdk/search/search-documents/src/models/models.ts @@ -0,0 +1,820 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + searchResourceEncryptionKeySerializer, + searchResourceEncryptionKeyDeserializer, + searchIndexerDataIdentityUnionSerializer, + searchIndexerDataIdentityUnionDeserializer, + SearchIndexerDataIdentityUnion, + KnowledgeSource, + IndexingSchedule, + indexingScheduleSerializer, + indexingScheduleDeserializer, + CreatedResources, + createdResourcesSerializer, + BlobIndexerDataToExtract, + BlobIndexerImageAction, + BlobIndexerParsingMode, + MarkdownHeaderDepth, + MarkdownParsingSubmode, + BlobIndexerPDFTextRotationAlgorithm, +} from "./azure/search/documents/indexes/models.js"; + +/** + * This file contains only generated model types and their (de)serializers. + * Disable the following rules for internal models with '_' prefix and deserializers which require 'any' for raw JSON input. + */ +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/explicit-module-boundary-types */ +/** Configuration for SharePoint knowledge source. */ +export interface IndexedSharePointKnowledgeSource extends KnowledgeSource { + kind: "indexedSharePoint"; + /** The parameters for the knowledge source. */ + indexedSharePointParameters: IndexedSharePointKnowledgeSourceParameters; +} + +export function indexedSharePointKnowledgeSourceSerializer( + item: IndexedSharePointKnowledgeSource, +): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + indexedSharePointParameters: indexedSharePointKnowledgeSourceParametersSerializer( + item["indexedSharePointParameters"], + ), + }; +} + +export function indexedSharePointKnowledgeSourceDeserializer( + item: any, +): IndexedSharePointKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + indexedSharePointParameters: indexedSharePointKnowledgeSourceParametersDeserializer( + item["indexedSharePointParameters"], + ), + }; +} + +/** Parameters for SharePoint knowledge source. */ +export interface IndexedSharePointKnowledgeSourceParameters { + /** An explicit identity to use for this knowledge source. */ + identity?: SearchIndexerDataIdentityUnion; + /** Key-based connection string or the ResourceId format if using a managed identity. */ + connectionString: string; + /** The name of the SharePoint container. */ + containerName: string; + /** Optional query to filter SharePoint content. */ + query?: string; + /** Optional ingestion parameters. */ + ingestionParameters?: KnowledgeSourceIngestionParameters; +} + +export function indexedSharePointKnowledgeSourceParametersSerializer( + item: IndexedSharePointKnowledgeSourceParameters, +): any { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionSerializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + query: item["query"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersSerializer(item["ingestionParameters"]), + }; +} + +export function indexedSharePointKnowledgeSourceParametersDeserializer( + item: any, +): IndexedSharePointKnowledgeSourceParameters { + return { + identity: !item["identity"] + ? item["identity"] + : searchIndexerDataIdentityUnionDeserializer(item["identity"]), + connectionString: item["connectionString"], + containerName: item["containerName"], + query: item["query"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersDeserializer(item["ingestionParameters"]), + }; +} + +/** Consolidates all general ingestion settings for knowledge sources. */ +export interface KnowledgeSourceIngestionParameters { + /** The schedule for ingestion. */ + ingestionSchedule?: IndexingSchedule; + /** The AI Services configuration. */ + aiServices?: AIServices; + /** The maximum number of items to extract from the source. */ + maxItemsToExtract?: number; + /** The maximum size of the document to extract. */ + maxDocumentExtractionSize?: number; + /** The data to extract from the source. */ + dataToExtract?: BlobIndexerDataToExtract; + /** The action to take on images. */ + imageAction?: BlobIndexerImageAction; + /** The parsing mode to use. */ + parsingMode?: BlobIndexerParsingMode; + /** Whether to fail on unprocessable document. */ + failOnUnprocessableDocument?: boolean; + /** Whether to fail on unsupported content type. */ + failOnUnsupportedContentType?: boolean; + /** Indexed file name extensions. */ + indexedFileNameExtensions?: string[]; + /** Excluded file name extensions. */ + excludedFileNameExtensions?: string[]; + /** Whether to index storage metadata only for oversized documents. */ + indexStorageMetadataOnlyForOversizedDocuments?: boolean; + /** Delimited text delimiter. */ + delimitedTextDelimiter?: string; + /** Whether the first line contains headers. */ + firstLineContainsHeaders?: boolean; + /** Delimited text headers. */ + delimitedTextHeaders?: string; + /** The document root. */ + documentRoot?: string; + /** The markdown header depth. */ + markdownHeaderDepth?: MarkdownHeaderDepth; + /** The markdown parsing submode. */ + markdownParsingSubmode?: MarkdownParsingSubmode; + /** The PDF text rotation algorithm. */ + pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; + /** Permission options for ingestion. */ + ingestionPermissionOptions?: KnowledgeSourceIngestionPermissionOption[]; + /** Whether to allow skillset to read file data. */ + allowSkillsetToReadFileData?: boolean; + /** Optional content extraction mode. Default is 'minimal'. */ + contentExtractionMode?: KnowledgeSourceContentExtractionMode; +} + +export function knowledgeSourceIngestionParametersSerializer( + item: KnowledgeSourceIngestionParameters, +): any { + return { + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleSerializer(item["ingestionSchedule"]), + aiServices: !item["aiServices"] ? item["aiServices"] : aiServicesSerializer(item["aiServices"]), + maxItemsToExtract: item["maxItemsToExtract"], + maxDocumentExtractionSize: item["maxDocumentExtractionSize"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + parsingMode: item["parsingMode"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + indexedFileNameExtensions: !item["indexedFileNameExtensions"] + ? item["indexedFileNameExtensions"] + : item["indexedFileNameExtensions"].map((p: any) => { + return p; + }), + excludedFileNameExtensions: !item["excludedFileNameExtensions"] + ? item["excludedFileNameExtensions"] + : item["excludedFileNameExtensions"].map((p: any) => { + return p; + }), + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + delimitedTextHeaders: item["delimitedTextHeaders"], + documentRoot: item["documentRoot"], + markdownHeaderDepth: item["markdownHeaderDepth"], + markdownParsingSubmode: item["markdownParsingSubmode"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + ingestionPermissionOptions: !item["ingestionPermissionOptions"] + ? item["ingestionPermissionOptions"] + : item["ingestionPermissionOptions"].map((p: any) => { + return p; + }), + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + contentExtractionMode: item["contentExtractionMode"], + }; +} + +export function knowledgeSourceIngestionParametersDeserializer( + item: any, +): KnowledgeSourceIngestionParameters { + return { + ingestionSchedule: !item["ingestionSchedule"] + ? item["ingestionSchedule"] + : indexingScheduleDeserializer(item["ingestionSchedule"]), + aiServices: !item["aiServices"] + ? item["aiServices"] + : aiServicesDeserializer(item["aiServices"]), + maxItemsToExtract: item["maxItemsToExtract"], + maxDocumentExtractionSize: item["maxDocumentExtractionSize"], + dataToExtract: item["dataToExtract"], + imageAction: item["imageAction"], + parsingMode: item["parsingMode"], + failOnUnprocessableDocument: item["failOnUnprocessableDocument"], + failOnUnsupportedContentType: item["failOnUnsupportedContentType"], + indexedFileNameExtensions: !item["indexedFileNameExtensions"] + ? item["indexedFileNameExtensions"] + : item["indexedFileNameExtensions"].map((p: any) => { + return p; + }), + excludedFileNameExtensions: !item["excludedFileNameExtensions"] + ? item["excludedFileNameExtensions"] + : item["excludedFileNameExtensions"].map((p: any) => { + return p; + }), + indexStorageMetadataOnlyForOversizedDocuments: + item["indexStorageMetadataOnlyForOversizedDocuments"], + delimitedTextDelimiter: item["delimitedTextDelimiter"], + firstLineContainsHeaders: item["firstLineContainsHeaders"], + delimitedTextHeaders: item["delimitedTextHeaders"], + documentRoot: item["documentRoot"], + markdownHeaderDepth: item["markdownHeaderDepth"], + markdownParsingSubmode: item["markdownParsingSubmode"], + pdfTextRotationAlgorithm: item["pdfTextRotationAlgorithm"], + ingestionPermissionOptions: !item["ingestionPermissionOptions"] + ? item["ingestionPermissionOptions"] + : item["ingestionPermissionOptions"].map((p: any) => { + return p; + }), + allowSkillsetToReadFileData: item["allowSkillsetToReadFileData"], + contentExtractionMode: item["contentExtractionMode"], + }; +} + +/** Parameters for AI Services. */ +export interface AIServices { + /** The URI of the AI Services endpoint. */ + uri: string; + /** The API key for accessing AI Services. */ + apiKey?: string; +} + +export function aiServicesSerializer(item: AIServices): any { + return { uri: item["uri"], apiKey: item["apiKey"] }; +} + +export function aiServicesDeserializer(item: any): AIServices { + return { + uri: item["uri"], + apiKey: item["apiKey"], + }; +} + +/** Permission types to ingest together with document content. */ +export enum KnownKnowledgeSourceIngestionPermissionOption { + /** Ingest explicit user identifiers alongside document content. */ + UserIds = "userIds", + /** Ingest group identifiers alongside document content. */ + GroupIds = "groupIds", + /** Ingest RBAC scope information alongside document content. */ + RbacScope = "rbacScope", +} + +/** + * Permission types to ingest together with document content. \ + * {@link KnownKnowledgeSourceIngestionPermissionOption} can be used interchangeably with KnowledgeSourceIngestionPermissionOption, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **userIds**: Ingest explicit user identifiers alongside document content. \ + * **groupIds**: Ingest group identifiers alongside document content. \ + * **rbacScope**: Ingest RBAC scope information alongside document content. + */ +export type KnowledgeSourceIngestionPermissionOption = string; + +/** Optional content extraction mode. Default is 'minimal'. */ +export enum KnownKnowledgeSourceContentExtractionMode { + /** Extracts only essential metadata while deferring most content processing. */ + Minimal = "minimal", + /** Performs the full default content extraction pipeline. */ + Standard = "standard", +} + +/** + * Optional content extraction mode. Default is 'minimal'. \ + * {@link KnownKnowledgeSourceContentExtractionMode} can be used interchangeably with KnowledgeSourceContentExtractionMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **minimal**: Extracts only essential metadata while deferring most content processing. \ + * **standard**: Performs the full default content extraction pipeline. + */ +export type KnowledgeSourceContentExtractionMode = string; + +/** Configuration for OneLake knowledge source. */ +export interface IndexedOneLakeKnowledgeSource extends KnowledgeSource { + kind: "indexedOneLake"; + /** The parameters for the knowledge source. */ + indexedOneLakeParameters: IndexedOneLakeKnowledgeSourceParameters; +} + +export function indexedOneLakeKnowledgeSourceSerializer(item: IndexedOneLakeKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + indexedOneLakeParameters: indexedOneLakeKnowledgeSourceParametersSerializer( + item["indexedOneLakeParameters"], + ), + }; +} + +export function indexedOneLakeKnowledgeSourceDeserializer( + item: any, +): IndexedOneLakeKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + indexedOneLakeParameters: indexedOneLakeKnowledgeSourceParametersDeserializer( + item["indexedOneLakeParameters"], + ), + }; +} + +/** Parameters for OneLake knowledge source. */ +export interface IndexedOneLakeKnowledgeSourceParameters { + /** The Fabric workspace ID. */ + fabricWorkspaceId: string; + /** The lakehouse ID. */ + lakehouseId: string; + /** Optional target path within the lakehouse. */ + targetPath?: string; + /** Optional ingestion parameters. */ + ingestionParameters?: KnowledgeSourceIngestionParameters; +} + +export function indexedOneLakeKnowledgeSourceParametersSerializer( + item: IndexedOneLakeKnowledgeSourceParameters, +): any { + return { + fabricWorkspaceId: item["fabricWorkspaceId"], + lakehouseId: item["lakehouseId"], + targetPath: item["targetPath"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersSerializer(item["ingestionParameters"]), + }; +} + +export function indexedOneLakeKnowledgeSourceParametersDeserializer( + item: any, +): IndexedOneLakeKnowledgeSourceParameters { + return { + fabricWorkspaceId: item["fabricWorkspaceId"], + lakehouseId: item["lakehouseId"], + targetPath: item["targetPath"], + ingestionParameters: !item["ingestionParameters"] + ? item["ingestionParameters"] + : knowledgeSourceIngestionParametersDeserializer(item["ingestionParameters"]), + }; +} + +/** Knowledge Source targeting web results. */ +export interface WebKnowledgeSource extends KnowledgeSource { + kind: "web"; + /** The parameters for the web knowledge source. */ + webParameters?: WebKnowledgeSourceParameters; +} + +export function webKnowledgeSourceSerializer(item: WebKnowledgeSource): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + webParameters: !item["webParameters"] + ? item["webParameters"] + : webKnowledgeSourceParametersSerializer(item["webParameters"]), + }; +} + +export function webKnowledgeSourceDeserializer(item: any): WebKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + webParameters: !item["webParameters"] + ? item["webParameters"] + : webKnowledgeSourceParametersDeserializer(item["webParameters"]), + }; +} + +/** Parameters for web knowledge source. */ +export interface WebKnowledgeSourceParameters { + /** Domain allow/block configuration for web results. */ + domains?: WebKnowledgeSourceDomains; +} + +export function webKnowledgeSourceParametersSerializer(item: WebKnowledgeSourceParameters): any { + return { + domains: !item["domains"] + ? item["domains"] + : webKnowledgeSourceDomainsSerializer(item["domains"]), + }; +} + +export function webKnowledgeSourceParametersDeserializer(item: any): WebKnowledgeSourceParameters { + return { + domains: !item["domains"] + ? item["domains"] + : webKnowledgeSourceDomainsDeserializer(item["domains"]), + }; +} + +/** Domain allow/block configuration for web knowledge source. */ +export interface WebKnowledgeSourceDomains { + /** Domains that are allowed for web results. */ + allowedDomains?: WebKnowledgeSourceDomain[]; + /** Domains that are blocked from web results. */ + blockedDomains?: WebKnowledgeSourceDomain[]; +} + +export function webKnowledgeSourceDomainsSerializer(item: WebKnowledgeSourceDomains): any { + return { + allowedDomains: !item["allowedDomains"] + ? item["allowedDomains"] + : webKnowledgeSourceDomainArraySerializer(item["allowedDomains"]), + blockedDomains: !item["blockedDomains"] + ? item["blockedDomains"] + : webKnowledgeSourceDomainArraySerializer(item["blockedDomains"]), + }; +} + +export function webKnowledgeSourceDomainsDeserializer(item: any): WebKnowledgeSourceDomains { + return { + allowedDomains: !item["allowedDomains"] + ? item["allowedDomains"] + : webKnowledgeSourceDomainArrayDeserializer(item["allowedDomains"]), + blockedDomains: !item["blockedDomains"] + ? item["blockedDomains"] + : webKnowledgeSourceDomainArrayDeserializer(item["blockedDomains"]), + }; +} + +export function webKnowledgeSourceDomainArraySerializer( + result: Array, +): any[] { + return result.map((item) => { + return webKnowledgeSourceDomainSerializer(item); + }); +} + +export function webKnowledgeSourceDomainArrayDeserializer( + result: Array, +): any[] { + return result.map((item) => { + return webKnowledgeSourceDomainDeserializer(item); + }); +} + +/** Configuration for web knowledge source domain. */ +export interface WebKnowledgeSourceDomain { + /** The address of the domain. */ + address: string; + /** Whether or not to include subpages from this domain. */ + includeSubpages?: boolean; +} + +export function webKnowledgeSourceDomainSerializer(item: WebKnowledgeSourceDomain): any { + return { address: item["address"], includeSubpages: item["includeSubpages"] }; +} + +export function webKnowledgeSourceDomainDeserializer(item: any): WebKnowledgeSourceDomain { + return { + address: item["address"], + includeSubpages: item["includeSubpages"], + }; +} + +/** Configuration for remote SharePoint knowledge source. */ +export interface RemoteSharePointKnowledgeSource extends KnowledgeSource { + kind: "remoteSharePoint"; + /** The parameters for the remote SharePoint knowledge source. */ + remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters; +} + +export function remoteSharePointKnowledgeSourceSerializer( + item: RemoteSharePointKnowledgeSource, +): any { + return { + description: item["description"], + kind: item["kind"], + "@odata.etag": item["eTag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeySerializer(item["encryptionKey"]), + remoteSharePointParameters: remoteSharePointKnowledgeSourceParametersSerializer( + item["remoteSharePointParameters"], + ), + }; +} + +export function remoteSharePointKnowledgeSourceDeserializer( + item: any, +): RemoteSharePointKnowledgeSource { + return { + name: item["name"], + description: item["description"], + kind: item["kind"], + eTag: item["@odata.etag"], + encryptionKey: !item["encryptionKey"] + ? item["encryptionKey"] + : searchResourceEncryptionKeyDeserializer(item["encryptionKey"]), + remoteSharePointParameters: remoteSharePointKnowledgeSourceParametersDeserializer( + item["remoteSharePointParameters"], + ), + }; +} + +/** Parameters for remote SharePoint knowledge source. */ +export interface RemoteSharePointKnowledgeSourceParameters { + /** Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to scope the retrieval before the query runs. */ + filterExpression?: string; + /** A list of metadata fields to be returned for each item in the response. Only retrievable metadata properties can be included in this list. By default, no metadata is returned. */ + resourceMetadata?: string[]; + /** Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint Online. */ + containerTypeId?: string; +} + +export function remoteSharePointKnowledgeSourceParametersSerializer( + item: RemoteSharePointKnowledgeSourceParameters, +): any { + return { + filterExpression: item["filterExpression"], + resourceMetadata: !item["resourceMetadata"] + ? item["resourceMetadata"] + : item["resourceMetadata"].map((p: any) => { + return p; + }), + containerTypeId: item["containerTypeId"], + }; +} + +export function remoteSharePointKnowledgeSourceParametersDeserializer( + item: any, +): RemoteSharePointKnowledgeSourceParameters { + return { + filterExpression: item["filterExpression"], + resourceMetadata: !item["resourceMetadata"] + ? item["resourceMetadata"] + : item["resourceMetadata"].map((p: any) => { + return p; + }), + containerTypeId: item["containerTypeId"], + }; +} + +/** Represents service-level indexer runtime counters. */ +export interface ServiceIndexersRuntime { + /** Cumulative runtime of all indexers in the service from the beginningTime to endingTime, in seconds. */ + usedSeconds: number; + /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */ + remainingSeconds?: number; + /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + beginningTime: Date; + /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + endingTime: Date; +} + +export function serviceIndexersRuntimeDeserializer(item: any): ServiceIndexersRuntime { + return { + usedSeconds: item["usedSeconds"], + remainingSeconds: item["remainingSeconds"], + beginningTime: new Date(item["beginningTime"]), + endingTime: new Date(item["endingTime"]), + }; +} + +/** Represents the indexer's cumulative runtime consumption in the service. */ +export interface IndexerRuntime { + /** Cumulative runtime of the indexer from the beginningTime to endingTime, in seconds. */ + usedSeconds: number; + /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */ + remainingSeconds?: number; + /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + beginningTime: Date; + /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */ + endingTime: Date; +} + +export function indexerRuntimeDeserializer(item: any): IndexerRuntime { + return { + usedSeconds: item["usedSeconds"], + remainingSeconds: item["remainingSeconds"], + beginningTime: new Date(item["beginningTime"]), + endingTime: new Date(item["endingTime"]), + }; +} + +/** The type of activity record. */ +export enum KnownKnowledgeBaseActivityRecordType { + /** Search index retrieval activity. */ + SearchIndex = "searchIndex", + /** Azure Blob retrieval activity. */ + AzureBlob = "azureBlob", + /** Indexed SharePoint retrieval activity. */ + IndexedSharePoint = "indexedSharePoint", + /** Indexed OneLake retrieval activity. */ + IndexedOneLake = "indexedOneLake", + /** Web retrieval activity. */ + Web = "web", + /** Remote SharePoint retrieval activity. */ + RemoteSharePoint = "remoteSharePoint", + /** LLM query planning activity. */ + ModelQueryPlanning = "modelQueryPlanning", + /** LLM answer synthesis activity. */ + ModelAnswerSynthesis = "modelAnswerSynthesis", + /** Agentic reasoning activity. */ + AgenticReasoning = "agenticReasoning", +} + +/** + * The type of activity record. \ + * {@link KnownKnowledgeBaseActivityRecordType} can be used interchangeably with KnowledgeBaseActivityRecordType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **searchIndex**: Search index retrieval activity. \ + * **azureBlob**: Azure Blob retrieval activity. \ + * **indexedSharePoint**: Indexed SharePoint retrieval activity. \ + * **indexedOneLake**: Indexed OneLake retrieval activity. \ + * **web**: Web retrieval activity. \ + * **remoteSharePoint**: Remote SharePoint retrieval activity. \ + * **modelQueryPlanning**: LLM query planning activity. \ + * **modelAnswerSynthesis**: LLM answer synthesis activity. \ + * **agenticReasoning**: Agentic reasoning activity. + */ +export type KnowledgeBaseActivityRecordType = string; + +/** The type of reference. */ +export enum KnownKnowledgeBaseReferenceType { + /** Search index document reference. */ + SearchIndex = "searchIndex", + /** Azure Blob document reference. */ + AzureBlob = "azureBlob", + /** Indexed SharePoint document reference. */ + IndexedSharePoint = "indexedSharePoint", + /** Indexed OneLake document reference. */ + IndexedOneLake = "indexedOneLake", + /** Web document reference. */ + Web = "web", + /** Remote SharePoint document reference. */ + RemoteSharePoint = "remoteSharePoint", +} + +/** + * The type of reference. \ + * {@link KnownKnowledgeBaseReferenceType} can be used interchangeably with KnowledgeBaseReferenceType, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **searchIndex**: Search index document reference. \ + * **azureBlob**: Azure Blob document reference. \ + * **indexedSharePoint**: Indexed SharePoint document reference. \ + * **indexedOneLake**: Indexed OneLake document reference. \ + * **web**: Web document reference. \ + * **remoteSharePoint**: Remote SharePoint document reference. + */ +export type KnowledgeBaseReferenceType = string; + +/** Represents the status and synchronization history of a knowledge source. */ +export interface KnowledgeSourceStatus { + /** The current synchronization status. */ + synchronizationStatus?: KnowledgeSourceSynchronizationStatus; + /** The created resources. */ + createdResources?: CreatedResources; + /** The current synchronization state. */ + currentSynchronizationState?: SynchronizationState; + /** The last synchronization state. */ + lastSynchronizationState?: CompletedSynchronizationState; + /** The statistics for the knowledge source. */ + statistics?: KnowledgeSourceStatistics; +} + +export function knowledgeSourceStatusSerializer(item: KnowledgeSourceStatus): any { + return { + synchronizationStatus: item["synchronizationStatus"], + createdResources: !item["createdResources"] + ? item["createdResources"] + : createdResourcesSerializer(item["createdResources"]), + currentSynchronizationState: !item["currentSynchronizationState"] + ? item["currentSynchronizationState"] + : synchronizationStateSerializer(item["currentSynchronizationState"]), + lastSynchronizationState: !item["lastSynchronizationState"] + ? item["lastSynchronizationState"] + : completedSynchronizationStateSerializer(item["lastSynchronizationState"]), + statistics: !item["statistics"] + ? item["statistics"] + : knowledgeSourceStatisticsSerializer(item["statistics"]), + }; +} + +/** The current synchronization status of the knowledge source. */ +export enum KnownKnowledgeSourceSynchronizationStatus { + /** The knowledge source is being provisioned. */ + Creating = "creating", + /** The knowledge source is active and synchronization runs are occurring. */ + Active = "active", + /** The knowledge source is being deleted. */ + Deleting = "deleting", +} + +/** + * The current synchronization status of the knowledge source. \ + * {@link KnownKnowledgeSourceSynchronizationStatus} can be used interchangeably with KnowledgeSourceSynchronizationStatus, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **creating**: The knowledge source is being provisioned. \ + * **active**: The knowledge source is active and synchronization runs are occurring. \ + * **deleting**: The knowledge source is being deleted. + */ +export type KnowledgeSourceSynchronizationStatus = string; + +/** Represents the current state of an ongoing synchronization that spans multiple indexer runs. */ +export interface SynchronizationState { + /** The start time of the current synchronization. */ + startTime: Date; + /** The number of item updates successfully processed in the current synchronization. */ + itemsUpdatesProcessed: number; + /** The number of item updates that failed in the current synchronization. */ + itemsUpdatesFailed: number; + /** The number of items skipped in the current synchronization. */ + itemsSkipped: number; +} + +export function synchronizationStateSerializer(item: SynchronizationState): any { + return { + startTime: item["startTime"].toISOString(), + itemsUpdatesProcessed: item["itemsUpdatesProcessed"], + itemsUpdatesFailed: item["itemsUpdatesFailed"], + itemsSkipped: item["itemsSkipped"], + }; +} + +/** Represents the completed state of the last synchronization. */ +export interface CompletedSynchronizationState { + /** The start time of the last completed synchronization. */ + startTime: Date; + /** The end time of the last completed synchronization. */ + endTime: Date; + /** The number of item updates successfully processed in the last synchronization. */ + itemsUpdatesProcessed: number; + /** The number of item updates that failed in the last synchronization. */ + itemsUpdatesFailed: number; + /** The number of items skipped in the last synchronization. */ + itemsSkipped: number; +} + +export function completedSynchronizationStateSerializer(item: CompletedSynchronizationState): any { + return { + startTime: item["startTime"].toISOString(), + endTime: item["endTime"].toISOString(), + itemsUpdatesProcessed: item["itemsUpdatesProcessed"], + itemsUpdatesFailed: item["itemsUpdatesFailed"], + itemsSkipped: item["itemsSkipped"], + }; +} + +/** Statistical information about knowledge source synchronization history. */ +export interface KnowledgeSourceStatistics { + /** Total number of synchronizations. */ + totalSynchronization: number; + /** Average synchronization duration. */ + averageSynchronizationDuration: string; + /** Average items processed per synchronization. */ + averageItemsProcessedPerSynchronization: number; +} + +export function knowledgeSourceStatisticsSerializer(item: KnowledgeSourceStatistics): any { + return { + totalSynchronization: item["totalSynchronization"], + averageSynchronizationDuration: item["averageSynchronizationDuration"], + averageItemsProcessedPerSynchronization: item["averageItemsProcessedPerSynchronization"], + }; +} + +/** The available API versions. */ +export enum KnownVersions { + /** The 2025-11-01-preview API version. */ + V20251101Preview = "2025-11-01-preview", +} diff --git a/sdk/search/search-documents/src/search/api/index.ts b/sdk/search/search-documents/src/search/api/index.ts new file mode 100644 index 000000000000..4cd0afe6f9ca --- /dev/null +++ b/sdk/search/search-documents/src/search/api/index.ts @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + autocompletePost, + autocompleteGet, + index, + suggestPost, + suggestGet, + getDocument, + searchPost, + searchGet, + getDocumentCount, +} from "./operations.js"; +export { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, +} from "./options.js"; +export { createSearch, SearchContext, SearchClientOptionalParams } from "./searchContext.js"; diff --git a/sdk/search/search-documents/src/search/api/operations.ts b/sdk/search/search-documents/src/search/api/operations.ts new file mode 100644 index 000000000000..618f592cb639 --- /dev/null +++ b/sdk/search/search-documents/src/search/api/operations.ts @@ -0,0 +1,712 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { SearchContext as Client } from "./index.js"; +import { + errorResponseDeserializer, + SearchDocumentsResult, + searchDocumentsResultDeserializer, + vectorQueryUnionArraySerializer, + hybridSearchSerializer, + LookupDocument, + lookupDocumentDeserializer, + SuggestDocumentsResult, + suggestDocumentsResultDeserializer, + IndexDocumentsBatch, + indexDocumentsBatchSerializer, + IndexDocumentsResult, + indexDocumentsResultDeserializer, + AutocompleteResult, + autocompleteResultDeserializer, +} from "../../models/azure/search/documents/models.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, +} from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _autocompletePostSend( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompletePostOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.post.autocomplete{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: { + search: searchText, + autocompleteMode: options?.autocompleteMode, + filter: options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + searchFields: options?.searchFields, + suggesterName: suggesterName, + top: options?.top, + }, + }); +} + +export async function _autocompletePostDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return autocompleteResultDeserializer(result.body); +} + +/** Autocompletes incomplete query terms based on input text and matching terms in the index. */ +export async function autocompletePost( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompletePostOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _autocompletePostSend(context, searchText, suggesterName, options); + return _autocompletePostDeserialize(result); +} + +export function _autocompleteGetSend( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompleteGetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.autocomplete{?api%2Dversion,search,suggesterName,autocompleteMode,%24filter,fuzzy,highlightPostTag,highlightPreTag,minimumCoverage,searchFields,%24top}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + search: searchText, + suggesterName: suggesterName, + autocompleteMode: options?.autocompleteMode, + "%24filter": options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + searchFields: !options?.searchFields + ? options?.searchFields + : options?.searchFields.map((p: any) => { + return p; + }), + "%24top": options?.top, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _autocompleteGetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return autocompleteResultDeserializer(result.body); +} + +/** Autocompletes incomplete query terms based on input text and matching terms in the index. */ +export async function autocompleteGet( + context: Client, + searchText: string, + suggesterName: string, + options: AutocompleteGetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _autocompleteGetSend(context, searchText, suggesterName, options); + return _autocompleteGetDeserialize(result); +} + +export function _indexSend( + context: Client, + batch: IndexDocumentsBatch, + options: IndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.index{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: indexDocumentsBatchSerializer(batch), + }); +} + +export async function _indexDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "207"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return indexDocumentsResultDeserializer(result.body); +} + +/** Sends a batch of document write actions to the index. */ +export async function index( + context: Client, + batch: IndexDocumentsBatch, + options: IndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _indexSend(context, batch, options); + return _indexDeserialize(result); +} + +export function _suggestPostSend( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestPostOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.post.suggest{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: { + filter: options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + orderby: options?.orderBy, + search: searchText, + searchFields: options?.searchFields, + select: options?.select, + suggesterName: suggesterName, + top: options?.top, + }, + }); +} + +export async function _suggestPostDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return suggestDocumentsResultDeserializer(result.body); +} + +/** Suggests documents in the index that match the given partial query text. */ +export async function suggestPost( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestPostOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _suggestPostSend(context, searchText, suggesterName, options); + return _suggestPostDeserialize(result); +} + +export function _suggestGetSend( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestGetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.suggest{?api%2Dversion,search,suggesterName,%24filter,fuzzy,highlightPostTag,highlightPreTag,minimumCoverage,%24orderby,searchFields,%24select,%24top}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + search: searchText, + suggesterName: suggesterName, + "%24filter": options?.filter, + fuzzy: options?.useFuzzyMatching, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + "%24orderby": !options?.orderBy + ? options?.orderBy + : options?.orderBy.map((p: any) => { + return p; + }), + searchFields: !options?.searchFields + ? options?.searchFields + : options?.searchFields.map((p: any) => { + return p; + }), + "%24select": !options?.select + ? options?.select + : options?.select.map((p: any) => { + return p; + }), + "%24top": options?.top, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _suggestGetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return suggestDocumentsResultDeserializer(result.body); +} + +/** Suggests documents in the index that match the given partial query text. */ +export async function suggestGet( + context: Client, + searchText: string, + suggesterName: string, + options: SuggestGetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _suggestGetSend(context, searchText, suggesterName, options); + return _suggestGetDeserialize(result); +} + +export function _getDocumentSend( + context: Client, + key: string, + options: GetDocumentOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs('{key}'){?api%2Dversion,%24select}", + { + key: key, + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + "%24select": !options?.selectedFields + ? options?.selectedFields + : options?.selectedFields.map((p: any) => { + return p; + }), + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDocumentDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return lookupDocumentDeserializer(result.body); +} + +/** Retrieves a document from the index. */ +export async function getDocument( + context: Client, + key: string, + options: GetDocumentOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDocumentSend(context, key, options); + return _getDocumentDeserialize(result); +} + +export function _searchPostSend( + context: Client, + options: SearchPostOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/search.post.search{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: { + count: options?.includeTotalCount, + facets: !options?.facets + ? options?.facets + : options?.facets.map((p: any) => { + return p; + }), + filter: options?.filter, + highlight: options?.highlightFields, + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + orderby: options?.orderBy, + queryType: options?.queryType, + scoringStatistics: options?.scoringStatistics, + sessionId: options?.sessionId, + scoringParameters: !options?.scoringParameters + ? options?.scoringParameters + : options?.scoringParameters.map((p: any) => { + return p; + }), + scoringProfile: options?.scoringProfile, + debug: options?.debug, + search: options?.searchText, + searchFields: options?.searchFields, + searchMode: options?.searchMode, + queryLanguage: options?.queryLanguage, + speller: options?.querySpeller, + select: options?.select, + skip: options?.skip, + top: options?.top, + semanticConfiguration: options?.semanticConfigurationName, + semanticErrorHandling: options?.semanticErrorHandling, + semanticMaxWaitInMilliseconds: options?.semanticMaxWaitInMilliseconds, + semanticQuery: options?.semanticQuery, + answers: options?.answers, + captions: options?.captions, + queryRewrites: options?.queryRewrites, + semanticFields: options?.semanticFields, + vectorQueries: !options?.vectorQueries + ? options?.vectorQueries + : vectorQueryUnionArraySerializer(options?.vectorQueries), + vectorFilterMode: options?.vectorFilterMode, + hybridSearch: !options?.hybridSearch + ? options?.hybridSearch + : hybridSearchSerializer(options?.hybridSearch), + }, + }); +} + +export async function _searchPostDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "206"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchDocumentsResultDeserializer(result.body); +} + +/** Searches for documents in the index. */ +export async function searchPost( + context: Client, + options: SearchPostOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _searchPostSend(context, options); + return _searchPostDeserialize(result); +} + +export function _searchGetSend( + context: Client, + options: SearchGetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs{?api%2Dversion,search,%24count,facet*,%24filter,highlight,highlightPostTag,highlightPreTag,minimumCoverage,%24orderby,queryType,scoringParameter*,scoringProfile,searchFields,searchMode,scoringStatistics,sessionId,%24select,%24skip,%24top,semanticConfiguration,semanticErrorHandling,semanticMaxWaitInMilliseconds,answers,captions,semanticQuery,queryRewrites,debug,queryLanguage,speller,semanticFields}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + search: options?.searchText, + "%24count": options?.includeTotalResultCount, + facet: !options?.facets + ? options?.facets + : options?.facets.map((p: any) => { + return p; + }), + "%24filter": options?.filter, + highlight: !options?.highlightFields + ? options?.highlightFields + : options?.highlightFields.map((p: any) => { + return p; + }), + highlightPostTag: options?.highlightPostTag, + highlightPreTag: options?.highlightPreTag, + minimumCoverage: options?.minimumCoverage, + "%24orderby": !options?.orderBy + ? options?.orderBy + : options?.orderBy.map((p: any) => { + return p; + }), + queryType: options?.queryType, + scoringParameter: !options?.scoringParameters + ? options?.scoringParameters + : options?.scoringParameters.map((p: any) => { + return p; + }), + scoringProfile: options?.scoringProfile, + searchFields: !options?.searchFields + ? options?.searchFields + : options?.searchFields.map((p: any) => { + return p; + }), + searchMode: options?.searchMode, + scoringStatistics: options?.scoringStatistics, + sessionId: options?.sessionId, + "%24select": !options?.select + ? options?.select + : options?.select.map((p: any) => { + return p; + }), + "%24skip": options?.skip, + "%24top": options?.top, + semanticConfiguration: options?.semanticConfiguration, + semanticErrorHandling: options?.semanticErrorHandling, + semanticMaxWaitInMilliseconds: options?.semanticMaxWaitInMilliseconds, + answers: options?.answers, + captions: options?.captions, + semanticQuery: options?.semanticQuery, + queryRewrites: options?.queryRewrites, + debug: options?.debug, + queryLanguage: options?.queryLanguage, + speller: options?.speller, + semanticFields: !options?.semanticFields + ? options?.semanticFields + : options?.semanticFields.map((p: any) => { + return p; + }), + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _searchGetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "206"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchDocumentsResultDeserializer(result.body); +} + +/** Searches for documents in the index. */ +export async function searchGet( + context: Client, + options: SearchGetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _searchGetSend(context, options); + return _searchGetDeserialize(result); +} + +export function _getDocumentCountSend( + context: Client, + options: GetDocumentCountOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/docs/$count{?api%2Dversion}", + { + indexName: context.indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "text/plain", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDocumentCountDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return result.body; +} + +/** Queries the number of documents in the index. */ +export async function getDocumentCount( + context: Client, + options: GetDocumentCountOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDocumentCountSend(context, options); + return _getDocumentCountDeserialize(result); +} diff --git a/sdk/search/search-documents/src/search/api/options.ts b/sdk/search/search-documents/src/search/api/options.ts new file mode 100644 index 000000000000..904a8e640d86 --- /dev/null +++ b/sdk/search/search-documents/src/search/api/options.ts @@ -0,0 +1,288 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + QueryType, + ScoringStatistics, + QueryDebugMode, + SearchMode, + QueryLanguage, + QuerySpellerType, + SemanticErrorMode, + QueryAnswerType, + QueryCaptionType, + QueryRewritesType, + VectorQueryUnion, + VectorFilterMode, + HybridSearch, + AutocompleteMode, +} from "../../models/azure/search/documents/models.js"; +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface AutocompletePostOptionalParams extends OperationOptions { + /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ + autocompleteMode?: AutocompleteMode; + /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will autocomplete terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ + searchFields?: string; + /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface AutocompleteGetOptionalParams extends OperationOptions { + /** Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. */ + autocompleteMode?: AutocompleteMode; + /** An OData expression that filters the documents used to produce completed terms for the Autocomplete result. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester. */ + searchFields?: string[]; + /** The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface IndexOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SuggestPostOptionalParams extends OperationOptions { + /** An OData expression that filters the documents considered for suggestions. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the suggestion query. Default is false. When set to true, the query will find suggestions even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestion query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** The comma-separated list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ + searchFields?: string; + /** The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results. */ + select?: string; + /** The number of suggestions to retrieve. This must be a value between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SuggestGetOptionalParams extends OperationOptions { + /** An OData expression that filters the documents considered for suggestions. */ + filter?: string; + /** A value indicating whether to use fuzzy matching for the suggestions query. Default is false. When set to true, the query will find terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and consume more resources. */ + useFuzzyMatching?: boolean; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestions query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80. */ + minimumCoverage?: number; + /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string[]; + /** The list of field names to search for the specified search text. Target fields must be included in the specified suggester. */ + searchFields?: string[]; + /** The list of fields to retrieve. If unspecified, only the key field will be included in the results. */ + select?: string[]; + /** The number of suggestions to retrieve. The value must be a number between 1 and 100. The default is 5. */ + top?: number; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface GetDocumentOptionalParams extends OperationOptions { + /** List of field names to retrieve for the document; Any field not retrieved will be missing from the returned document. */ + selectedFields?: string[]; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SearchPostOptionalParams extends OperationOptions { + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ + includeTotalCount?: boolean; + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ + facets?: string[]; + /** The OData $filter expression to apply to the search query. */ + filter?: string; + /** The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ + highlightFields?: string; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ + minimumCoverage?: number; + /** The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, or desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string; + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ + queryType?: QueryType; + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. */ + scoringStatistics?: ScoringStatistics; + /** A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ + sessionId?: string; + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ + scoringParameters?: string[]; + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ + scoringProfile?: string; + /** Enables a debugging tool that can be used to further explore your reranked results. */ + debug?: QueryDebugMode; + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; + /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ + searchFields?: string; + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ + searchMode?: SearchMode; + /** A value that specifies the language of the search query. */ + queryLanguage?: QueryLanguage; + /** A value that specified the type of the speller to use to spell-correct individual search query terms. */ + querySpeller?: QuerySpellerType; + /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ + select?: string; + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ + skip?: number; + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ + top?: number; + /** The name of a semantic configuration that will be used when processing documents for queries of type semantic. */ + semanticConfigurationName?: string; + /** Allows the user to choose whether a semantic call should fail completely (default / current behavior), or to return partial results. */ + semanticErrorHandling?: SemanticErrorMode; + /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ + semanticMaxWaitInMilliseconds?: number; + /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ + semanticQuery?: string; + /** A value that specifies whether answers should be returned as part of the search response. */ + answers?: QueryAnswerType; + /** A value that specifies whether captions should be returned as part of the search response. */ + captions?: QueryCaptionType; + /** A value that specifies whether query rewrites should be generated to augment the search query. */ + queryRewrites?: QueryRewritesType; + /** The comma-separated list of field names used for semantic ranking. */ + semanticFields?: string; + /** The query parameters for vector and hybrid search queries. */ + vectorQueries?: VectorQueryUnion[]; + /** Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. */ + vectorFilterMode?: VectorFilterMode; + /** The query parameters to configure hybrid search behaviors. */ + hybridSearch?: HybridSearch; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface SearchGetOptionalParams extends OperationOptions { + /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ + searchText?: string; + /** A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation. */ + includeTotalResultCount?: boolean; + /** The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs. */ + facets?: string[]; + /** The OData $filter expression to apply to the search query. */ + filter?: string; + /** The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. */ + highlightFields?: string[]; + /** A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>. */ + highlightPostTag?: string; + /** A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>. */ + highlightPreTag?: string; + /** A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100. */ + minimumCoverage?: number; + /** The list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed by asc to indicate ascending, and desc to indicate descending. The default is ascending order. Ties will be broken by the match scores of documents. If no OrderBy is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses. */ + orderBy?: string[]; + /** A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. */ + queryType?: QueryType; + /** The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be "mylocation--122.2,44.8" (without the quotes). */ + scoringParameters?: string[]; + /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ + scoringProfile?: string; + /** The list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ + searchFields?: string[]; + /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ + searchMode?: SearchMode; + /** A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. */ + scoringStatistics?: ScoringStatistics; + /** A value to be used to create a sticky session, which can help to get more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. */ + sessionId?: string; + /** The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ + select?: string[]; + /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use $skip due to this limitation, consider using $orderby on a totally-ordered key and $filter with a range query instead. */ + skip?: number; + /** The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. */ + top?: number; + /** The name of the semantic configuration that lists which fields should be used for semantic ranking, captions, highlights, and answers */ + semanticConfiguration?: string; + /** Allows the user to choose whether a semantic call should fail completely, or to return partial results (default). */ + semanticErrorHandling?: SemanticErrorMode; + /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ + semanticMaxWaitInMilliseconds?: number; + /** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ + answers?: QueryAnswerType; + /** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ + captions?: QueryCaptionType; + /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ + semanticQuery?: string; + /** When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. This parameter is only valid if the query type is `semantic`. */ + queryRewrites?: QueryRewritesType; + /** Enables a debugging tool that can be used to further explore your search results. */ + debug?: QueryDebugMode; + /** The language of the query. */ + queryLanguage?: QueryLanguage; + /** Improve search recall by spell-correcting individual search query terms. */ + speller?: QuerySpellerType; + /** The list of field names used for semantic ranking. */ + semanticFields?: string[]; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface GetDocumentCountOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} diff --git a/sdk/search/search-documents/src/search/api/searchContext.ts b/sdk/search/search-documents/src/search/api/searchContext.ts new file mode 100644 index 000000000000..fbcc99814371 --- /dev/null +++ b/sdk/search/search-documents/src/search/api/searchContext.ts @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface SearchContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; + /** The name of the index. */ + indexName: string; +} + +/** Optional parameters for the client. */ +export interface SearchClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createSearch( + endpointParam: string, + credential: KeyCredential | TokenCredential, + indexName: string, + options: SearchClientOptionalParams = {}, +): SearchContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion, indexName } as SearchContext; +} diff --git a/sdk/search/search-documents/src/search/index.ts b/sdk/search/search-documents/src/search/index.ts new file mode 100644 index 000000000000..5c33e50cb38b --- /dev/null +++ b/sdk/search/search-documents/src/search/index.ts @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { SearchClient } from "./searchClient.js"; +export { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, + SearchContext, + SearchClientOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/src/search/searchClient.ts b/sdk/search/search-documents/src/search/searchClient.ts new file mode 100644 index 000000000000..e0d915db2cc6 --- /dev/null +++ b/sdk/search/search-documents/src/search/searchClient.ts @@ -0,0 +1,134 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { createSearch, SearchContext, SearchClientOptionalParams } from "./api/index.js"; +import { + SearchDocumentsResult, + LookupDocument, + SuggestDocumentsResult, + IndexDocumentsBatch, + IndexDocumentsResult, + AutocompleteResult, +} from "../models/azure/search/documents/models.js"; +import { + autocompletePost, + autocompleteGet, + index, + suggestPost, + suggestGet, + getDocument, + searchPost, + searchGet, + getDocumentCount, +} from "./api/operations.js"; +import { + AutocompletePostOptionalParams, + AutocompleteGetOptionalParams, + IndexOptionalParams, + SuggestPostOptionalParams, + SuggestGetOptionalParams, + GetDocumentOptionalParams, + SearchPostOptionalParams, + SearchGetOptionalParams, + GetDocumentCountOptionalParams, +} from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { SearchClientOptionalParams } from "./api/searchContext.js"; + +export class SearchClient { + private _client: SearchContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + indexName: string, + options: SearchClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createSearch(endpointParam, credential, indexName, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** Autocompletes incomplete query terms based on input text and matching terms in the index. */ + autocompletePost( + searchText: string, + suggesterName: string, + options: AutocompletePostOptionalParams = { requestOptions: {} }, + ): Promise { + return autocompletePost(this._client, searchText, suggesterName, options); + } + + /** Autocompletes incomplete query terms based on input text and matching terms in the index. */ + autocompleteGet( + searchText: string, + suggesterName: string, + options: AutocompleteGetOptionalParams = { requestOptions: {} }, + ): Promise { + return autocompleteGet(this._client, searchText, suggesterName, options); + } + + /** Sends a batch of document write actions to the index. */ + index( + batch: IndexDocumentsBatch, + options: IndexOptionalParams = { requestOptions: {} }, + ): Promise { + return index(this._client, batch, options); + } + + /** Suggests documents in the index that match the given partial query text. */ + suggestPost( + searchText: string, + suggesterName: string, + options: SuggestPostOptionalParams = { requestOptions: {} }, + ): Promise { + return suggestPost(this._client, searchText, suggesterName, options); + } + + /** Suggests documents in the index that match the given partial query text. */ + suggestGet( + searchText: string, + suggesterName: string, + options: SuggestGetOptionalParams = { requestOptions: {} }, + ): Promise { + return suggestGet(this._client, searchText, suggesterName, options); + } + + /** Retrieves a document from the index. */ + getDocument( + key: string, + options: GetDocumentOptionalParams = { requestOptions: {} }, + ): Promise { + return getDocument(this._client, key, options); + } + + /** Searches for documents in the index. */ + searchPost( + options: SearchPostOptionalParams = { requestOptions: {} }, + ): Promise { + return searchPost(this._client, options); + } + + /** Searches for documents in the index. */ + searchGet( + options: SearchGetOptionalParams = { requestOptions: {} }, + ): Promise { + return searchGet(this._client, options); + } + + /** Queries the number of documents in the index. */ + getDocumentCount( + options: GetDocumentCountOptionalParams = { requestOptions: {} }, + ): Promise { + return getDocumentCount(this._client, options); + } +} diff --git a/sdk/search/search-documents/src/searchClient.ts b/sdk/search/search-documents/src/searchClient.ts index 332011dc0d41..10e766d7079b 100644 --- a/sdk/search/search-documents/src/searchClient.ts +++ b/sdk/search/search-documents/src/searchClient.ts @@ -5,23 +5,23 @@ import type { KeyCredential, TokenCredential } from "@azure/core-auth"; import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; -import type { Pipeline } from "@azure/core-rest-pipeline"; -import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; +import { + bearerTokenAuthenticationPolicy, + bearerTokenAuthenticationPolicyName, + type Pipeline, +} from "@azure/core-rest-pipeline"; import { decode, encode } from "./base64.js"; import type { - AutocompleteRequest, AutocompleteResult, IndexDocumentsResult, QueryAnswerType as BaseAnswers, QueryCaptionType as BaseCaptions, QueryRewritesType as GeneratedQueryRewrites, SearchRequest as GeneratedSearchRequest, - SuggestRequest, VectorQueryUnion as GeneratedVectorQuery, -} from "./generated/data/models/index.js"; -import { SearchClient as GeneratedClient } from "./generated/data/searchClient.js"; +} from "./models/azure/search/documents/index.js"; +import type { SearchClientOptionalParams } from "./search/searchClient.js"; +import { SearchClient as GeneratedClient } from "./search/searchClient.js"; import { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; import type { AutocompleteOptions, @@ -58,12 +58,14 @@ import { KnownSearchAudience } from "./searchAudience.js"; import type { IndexDocumentsClient } from "./searchIndexingBufferedSender.js"; import { deserialize, serialize } from "./serialization.js"; import * as utils from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +import { createSpan, tracingClient } from "./tracing.js"; +import type { ClientOptions, OperationOptions } from "@azure-rest/core-client"; +import type { GetDocumentOptionalParams, SuggestPostOptionalParams } from "./search/index.js"; /** * Client options used to configure AI Search API requests. */ -export interface SearchClientOptions extends ExtendedCommonClientOptions { +export interface SearchClientOptions extends ClientOptions { /** * The API version to use when communicating with the service. * @deprecated use {@link serviceVersion} instead @@ -176,7 +178,7 @@ export class SearchClient implements IndexDocumentsClient this.endpoint = endpoint; this.indexName = indexName; - const internalClientPipelineOptions: InternalClientPipelineOptions = { + const internalClientPipelineOptions: SearchClientOptionalParams = { ...options, ...{ loggingOptions: { @@ -199,18 +201,20 @@ export class SearchClient implements IndexDocumentsClient this.client = new GeneratedClient( this.endpoint, + credential, this.indexName, - this.serviceVersion, internalClientPipelineOptions, ); this.pipeline = this.client.pipeline; + // TODO: consider leaving the policy in-place instead of removing and re-adding + this.pipeline.removePolicy({ name: bearerTokenAuthenticationPolicyName }); + if (isTokenCredential(credential)) { const scope: string = options.audience ? `${options.audience}/.default` : `${KnownSearchAudience.AzurePublicCloud}/.default`; - this.client.pipeline.addPolicy( bearerTokenAuthenticationPolicy({ credential, scopes: scope }), ); @@ -227,29 +231,13 @@ export class SearchClient implements IndexDocumentsClient */ // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async getDocumentsCount(options: CountDocumentsOptions = {}): Promise { - const { span, updatedOptions } = createSpan("SearchClient-getDocumentsCount", options); - try { - let documentsCount: number = 0; - await this.client.documents.count({ - ...updatedOptions, - onResponse: (rawResponse, flatResponse) => { - documentsCount = Number(rawResponse.bodyAsText); - if (updatedOptions.onResponse) { - updatedOptions.onResponse(rawResponse, flatResponse); - } - }, - }); - - return documentsCount; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchClient-getDocumentsCount", + options, + async (updatedOptions) => { + return this.client.getDocumentCount(updatedOptions); + }, + ); } /** @@ -289,35 +277,26 @@ export class SearchClient implements IndexDocumentsClient options: AutocompleteOptions = {}, ): Promise { const { searchFields, ...nonFieldOptions } = options; - const fullOptions: AutocompleteRequest = { - searchText: searchText, - suggesterName: suggesterName, + const fullOptions = { searchFields: this.convertSearchFields(searchFields), ...nonFieldOptions, }; - if (!fullOptions.searchText) { + if (!searchText) { throw new RangeError("searchText must be provided."); } - if (!fullOptions.suggesterName) { + if (!suggesterName) { throw new RangeError("suggesterName must be provided."); } - const { span, updatedOptions } = createSpan("SearchClient-autocomplete", options); - - try { - const result = await this.client.documents.autocompletePost(fullOptions, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchClient-autocomplete", + fullOptions, + async (updatedOptions) => { + return this.client.autocompletePost(searchText, suggesterName, updatedOptions); + }, + ); } private async searchDocuments>( @@ -348,7 +327,7 @@ export class SearchClient implements IndexDocumentsClient } = semanticSearchOptions ?? {}; const { queries, filterMode, ...restVectorOptions } = vectorSearchOptions ?? {}; - const fullOptions: GeneratedSearchRequest = { + const fullOptions: GeneratedSearchRequest & OperationOptions = { ...restSemanticOptions, ...restVectorOptions, ...restOptions, @@ -357,7 +336,7 @@ export class SearchClient implements IndexDocumentsClient semanticFields: this.convertSemanticFields(semanticFields), select: this.convertSelect(select) || "*", orderBy: this.convertOrderBy(orderBy), - includeTotalResultCount: includeTotalCount, + includeTotalCount, vectorQueries: queries?.map(this.convertVectorQuery.bind(this)), answers: this.convertQueryAnswers(answers), captions: this.convertQueryCaptions(captions), @@ -369,51 +348,42 @@ export class SearchClient implements IndexDocumentsClient hybridSearch: hybridSearch, }; - const { span, updatedOptions } = createSpan("SearchClient-searchDocuments", options); - - try { - const result = await this.client.documents.searchPost( - { - ...fullOptions, + return tracingClient.withSpan( + "SearchClient-searchDocuments", + fullOptions, + async (updatedOptions) => { + const result = await this.client.searchPost({ + ...updatedOptions, searchText: searchText, - }, - updatedOptions, - ); + }); + + const { + results, + nextLink, + nextPageParameters: resultNextPageParameters, + semanticPartialResponseReason: semanticErrorReason, + semanticPartialResponseType: semanticSearchResultsType, + ...restResult + } = result as typeof result & { + semanticPartialResponseReason: SemanticErrorReason | undefined; + semanticPartialResponseType: SemanticSearchResultsType | undefined; + }; - const { - results, - nextLink, - nextPageParameters: resultNextPageParameters, - semanticPartialResponseReason: semanticErrorReason, - semanticPartialResponseType: semanticSearchResultsType, - ...restResult - } = result as typeof result & { - semanticPartialResponseReason: SemanticErrorReason | undefined; - semanticPartialResponseType: SemanticSearchResultsType | undefined; - }; + const modifiedResults = utils.generatedSearchResultToPublicSearchResult( + results, + ); - const modifiedResults = utils.generatedSearchResultToPublicSearchResult( - results, - ); - - const converted: SearchDocumentsPageResult = { - ...restResult, - results: modifiedResults, - semanticErrorReason, - semanticSearchResultsType, - continuationToken: this.encodeContinuationToken(nextLink, resultNextPageParameters), - }; + const converted: SearchDocumentsPageResult = { + ...restResult, + results: modifiedResults, + semanticErrorReason, + semanticSearchResultsType, + continuationToken: this.encodeContinuationToken(nextLink, resultNextPageParameters), + }; - return deserialize>(converted); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return deserialize>(converted); + }, + ); } private async *listSearchResultsPage>( @@ -511,26 +481,15 @@ export class SearchClient implements IndexDocumentsClient */ public async search>( searchText?: string, - options?: SearchOptions, + options: SearchOptions = {}, ): Promise> { - const { span, updatedOptions } = createSpan("SearchClient-search", options); - - try { + return tracingClient.withSpan("SearchClient-search", options, async (updatedOptions) => { const pageResult = await this.searchDocuments(searchText, updatedOptions); - return { ...pageResult, results: this.listSearchResults(pageResult, searchText, updatedOptions), }; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + }); } /** @@ -572,27 +531,23 @@ export class SearchClient implements IndexDocumentsClient options: SuggestOptions = {}, ): Promise> { const { select, searchFields, orderBy, ...nonFieldOptions } = options; - const fullOptions: SuggestRequest = { - searchText: searchText, - suggesterName: suggesterName, + const fullOptions: SuggestPostOptionalParams = { searchFields: this.convertSearchFields(searchFields), select: this.convertSelect(select), orderBy: this.convertOrderBy(orderBy), ...nonFieldOptions, }; - if (!fullOptions.searchText) { + if (!searchText) { throw new RangeError("searchText must be provided."); } - if (!fullOptions.suggesterName) { + if (!suggesterName) { throw new RangeError("suggesterName must be provided."); } - const { span, updatedOptions } = createSpan("SearchClient-suggest", options); - - try { - const result = await this.client.documents.suggestPost(fullOptions, updatedOptions); + return tracingClient.withSpan("SearchClient-suggest", fullOptions, async (updatedOptions) => { + const result = await this.client.suggestPost(searchText, suggesterName, updatedOptions); const modifiedResult = utils.generatedSuggestDocumentsResultToPublicSuggestDocumentsResult< TModel, @@ -600,15 +555,7 @@ export class SearchClient implements IndexDocumentsClient >(result); return deserialize>(modifiedResult); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + }); } /** @@ -620,23 +567,17 @@ export class SearchClient implements IndexDocumentsClient key: string, options: GetDocumentOptions = {}, ): Promise> { - const { span, updatedOptions } = createSpan("SearchClient-getDocument", options); - - try { - const result = await this.client.documents.get(key, { - ...updatedOptions, - selectedFields: updatedOptions.selectedFields as string[] | undefined, - }); - return deserialize>(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchClient-getDocument", + options as OperationOptions, + async (updatedOptions: GetDocumentOptionalParams) => { + const result = await this.client.getDocument(key, { + ...updatedOptions, + selectedFields: updatedOptions.selectedFields as string[] | undefined, + }); + return deserialize>(result); + }, + ); } /** @@ -654,34 +595,29 @@ export class SearchClient implements IndexDocumentsClient batch: IndexDocumentsBatch, options: IndexDocumentsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchClient-indexDocuments", options); - try { - let status: number = 0; - const result = await this.client.documents.index( - { actions: serialize(batch.actions) }, - { - ...updatedOptions, - onResponse: (rawResponse, flatResponse) => { - status = rawResponse.status; - if (updatedOptions.onResponse) { - updatedOptions.onResponse(rawResponse, flatResponse); - } + return tracingClient.withSpan( + "SearchClient-indexDocuments", + options, + async (updatedOptions) => { + let status = 0; + const result = await this.client.index( + { actions: serialize(batch.actions) }, + { + ...updatedOptions, + onResponse: (rawResponse, flatResponse) => { + status = rawResponse.status; + if (updatedOptions.onResponse) { + updatedOptions.onResponse(rawResponse, flatResponse); + } + }, }, - }, - ); - if (options.throwOnAnyFailure && status === 207) { - throw result; - } - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + ); + if (options.throwOnAnyFailure && status === 207) { + throw result; + } + return result; + }, + ); } /** @@ -693,22 +629,16 @@ export class SearchClient implements IndexDocumentsClient documents: TModel[], options: UploadDocumentsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchClient-uploadDocuments", options); - - const batch = new IndexDocumentsBatch(); - batch.upload(documents); + return tracingClient.withSpan( + "SearchClient-uploadDocuments", + options, + async (updatedOptions) => { + const batch = new IndexDocumentsBatch(); + batch.upload(documents); - try { - return await this.indexDocuments(batch, updatedOptions); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return this.indexDocuments(batch, updatedOptions); + }, + ); } /** @@ -753,22 +683,15 @@ export class SearchClient implements IndexDocumentsClient documents: TModel[], options: MergeOrUploadDocumentsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchClient-mergeDocuments", options); - - const batch = new IndexDocumentsBatch(); - batch.mergeOrUpload(documents); - - try { - return await this.indexDocuments(batch, updatedOptions); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchClient-mergeOrUploadDocuments", + options, + async (updatedOptions) => { + const batch = new IndexDocumentsBatch(); + batch.mergeOrUpload(documents); + return this.indexDocuments(batch, updatedOptions); + }, + ); } /** @@ -798,26 +721,20 @@ export class SearchClient implements IndexDocumentsClient keyValuesOrOptions?: string[] | DeleteDocumentsOptions, options: DeleteDocumentsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchClient-deleteDocuments", options); - - const batch = new IndexDocumentsBatch(); - if (typeof keyNameOrDocuments === "string") { - batch.delete(keyNameOrDocuments, keyValuesOrOptions as string[]); - } else { - batch.delete(keyNameOrDocuments as TModel[]); - } - - try { - return await this.indexDocuments(batch, updatedOptions); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchClient-deleteDocuments", + options, + async (updatedOptions) => { + const batch = new IndexDocumentsBatch(); + if (typeof keyNameOrDocuments === "string") { + batch.delete(keyNameOrDocuments, keyValuesOrOptions as string[]); + } else { + batch.delete(keyNameOrDocuments as TModel[]); + } + + return this.indexDocuments(batch, updatedOptions); + }, + ); } private encodeContinuationToken( diff --git a/sdk/search/search-documents/src/searchIndex/api/index.ts b/sdk/search/search-documents/src/searchIndex/api/index.ts new file mode 100644 index 000000000000..fc698a4544e2 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndex/api/index.ts @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + listIndexStatsSummary, + getServiceStatistics, + createKnowledgeSource, + listKnowledgeSources, + getKnowledgeSource, + deleteKnowledgeSource, + createOrUpdateKnowledgeSource, + createKnowledgeBase, + listKnowledgeBases, + getKnowledgeBase, + deleteKnowledgeBase, + createOrUpdateKnowledgeBase, + createAlias, + listAliases, + getAlias, + deleteAlias, + createOrUpdateAlias, + analyzeText, + getIndexStatistics, + createIndex, + listIndexes, + getIndex, + deleteIndex, + createOrUpdateIndex, + createSynonymMap, + getSynonymMaps, + getSynonymMap, + deleteSynonymMap, + createOrUpdateSynonymMap, +} from "./operations.js"; +export { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, +} from "./options.js"; +export { + createSearchIndex, + SearchIndexContext, + SearchIndexClientOptionalParams, +} from "./searchIndexContext.js"; diff --git a/sdk/search/search-documents/src/searchIndex/api/operations.ts b/sdk/search/search-documents/src/searchIndex/api/operations.ts new file mode 100644 index 000000000000..4debe3256e22 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndex/api/operations.ts @@ -0,0 +1,1612 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { SearchIndexContext as Client } from "./index.js"; +import { + SynonymMap, + synonymMapSerializer, + synonymMapDeserializer, + ListSynonymMapsResult, + listSynonymMapsResultDeserializer, + SearchIndex, + searchIndexSerializer, + searchIndexDeserializer, + _ListIndexesResult, + _listIndexesResultDeserializer, + GetIndexStatisticsResult, + getIndexStatisticsResultDeserializer, + AnalyzeTextOptions, + analyzeTextOptionsSerializer, + AnalyzeResult, + analyzeResultDeserializer, + SearchAlias, + searchAliasSerializer, + searchAliasDeserializer, + _ListAliasesResult, + _listAliasesResultDeserializer, + KnowledgeBase, + knowledgeBaseSerializer, + knowledgeBaseDeserializer, + _ListKnowledgeBasesResult, + _listKnowledgeBasesResultDeserializer, + knowledgeSourceUnionSerializer, + knowledgeSourceUnionDeserializer, + KnowledgeSourceUnion, + _ListKnowledgeSourcesResult, + _listKnowledgeSourcesResultDeserializer, + SearchServiceStatistics, + searchServiceStatisticsDeserializer, + _ListIndexStatsSummary, + _listIndexStatsSummaryDeserializer, + IndexStatisticsSummary, +} from "../../models/azure/search/documents/indexes/models.js"; +import { errorResponseDeserializer } from "../../models/azure/search/documents/models.js"; +import { + PagedAsyncIterableIterator, + buildPagedAsyncIterator, +} from "../../static-helpers/pagingHelpers.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, +} from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _listIndexStatsSummarySend( + context: Client, + options: ListIndexStatsSummaryOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexstats{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listIndexStatsSummaryDeserialize( + result: PathUncheckedResponse, +): Promise<_ListIndexStatsSummary> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listIndexStatsSummaryDeserializer(result.body); +} + +/** Retrieves a summary of statistics for all indexes in the search service. */ +export function listIndexStatsSummary( + context: Client, + options: ListIndexStatsSummaryOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listIndexStatsSummarySend(context, options), + _listIndexStatsSummaryDeserialize, + ["200"], + { itemName: "IndexesStatistics" }, + ); +} + +export function _getServiceStatisticsSend( + context: Client, + options: GetServiceStatisticsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/servicestats{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getServiceStatisticsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchServiceStatisticsDeserializer(result.body); +} + +/** Gets service level statistics for a search service. */ +export async function getServiceStatistics( + context: Client, + options: GetServiceStatisticsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getServiceStatisticsSend(context, options); + return _getServiceStatisticsDeserialize(result); +} + +export function _createKnowledgeSourceSend( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + options: CreateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeSourceUnionSerializer(knowledgeSource), + }); +} + +export async function _createKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeSourceUnionDeserializer(result.body); +} + +/** Creates a new knowledge source. */ +export async function createKnowledgeSource( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + options: CreateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createKnowledgeSourceSend(context, knowledgeSource, options); + return _createKnowledgeSourceDeserialize(result); +} + +export function _listKnowledgeSourcesSend( + context: Client, + options: ListKnowledgeSourcesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listKnowledgeSourcesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListKnowledgeSourcesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listKnowledgeSourcesResultDeserializer(result.body); +} + +/** Lists all knowledge sources available for a search service. */ +export function listKnowledgeSources( + context: Client, + options: ListKnowledgeSourcesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listKnowledgeSourcesSend(context, options), + _listKnowledgeSourcesDeserialize, + ["200"], + { itemName: "value" }, + ); +} + +export function _getKnowledgeSourceSend( + context: Client, + sourceName: string, + options: GetKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources('{sourceName}'){?api%2Dversion}", + { + sourceName: sourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeSourceUnionDeserializer(result.body); +} + +/** Retrieves a knowledge source definition. */ +export async function getKnowledgeSource( + context: Client, + sourceName: string, + options: GetKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getKnowledgeSourceSend(context, sourceName, options); + return _getKnowledgeSourceDeserialize(result); +} + +export function _deleteKnowledgeSourceSend( + context: Client, + sourceName: string, + options: DeleteKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources('{sourceName}'){?api%2Dversion}", + { + sourceName: sourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes an existing knowledge source. */ +export async function deleteKnowledgeSource( + context: Client, + sourceName: string, + options: DeleteKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteKnowledgeSourceSend(context, sourceName, options); + return _deleteKnowledgeSourceDeserialize(result); +} + +export function _createOrUpdateKnowledgeSourceSend( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + sourceName: string, + options: CreateOrUpdateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgesources('{sourceName}'){?api%2Dversion}", + { + sourceName: sourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeSourceUnionSerializer(knowledgeSource), + }); +} + +export async function _createOrUpdateKnowledgeSourceDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeSourceUnionDeserializer(result.body); +} + +/** Creates a new knowledge source or updates an knowledge source if it already exists. */ +export async function createOrUpdateKnowledgeSource( + context: Client, + knowledgeSource: KnowledgeSourceUnion, + sourceName: string, + options: CreateOrUpdateKnowledgeSourceOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateKnowledgeSourceSend( + context, + knowledgeSource, + sourceName, + options, + ); + return _createOrUpdateKnowledgeSourceDeserialize(result); +} + +export function _createKnowledgeBaseSend( + context: Client, + knowledgeBase: KnowledgeBase, + options: CreateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeBaseSerializer(knowledgeBase), + }); +} + +export async function _createKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeBaseDeserializer(result.body); +} + +/** Creates a new knowledge base. */ +export async function createKnowledgeBase( + context: Client, + knowledgeBase: KnowledgeBase, + options: CreateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createKnowledgeBaseSend(context, knowledgeBase, options); + return _createKnowledgeBaseDeserialize(result); +} + +export function _listKnowledgeBasesSend( + context: Client, + options: ListKnowledgeBasesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listKnowledgeBasesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListKnowledgeBasesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listKnowledgeBasesResultDeserializer(result.body); +} + +/** Lists all knowledge bases available for a search service. */ +export function listKnowledgeBases( + context: Client, + options: ListKnowledgeBasesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listKnowledgeBasesSend(context, options), + _listKnowledgeBasesDeserialize, + ["200"], + { itemName: "value" }, + ); +} + +export function _getKnowledgeBaseSend( + context: Client, + knowledgeBaseName: string, + options: GetKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases('{knowledgeBaseName}'){?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeBaseDeserializer(result.body); +} + +/** Retrieves a knowledge base definition. */ +export async function getKnowledgeBase( + context: Client, + knowledgeBaseName: string, + options: GetKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getKnowledgeBaseSend(context, knowledgeBaseName, options); + return _getKnowledgeBaseDeserialize(result); +} + +export function _deleteKnowledgeBaseSend( + context: Client, + knowledgeBaseName: string, + options: DeleteKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases('{knowledgeBaseName}'){?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a knowledge base. */ +export async function deleteKnowledgeBase( + context: Client, + knowledgeBaseName: string, + options: DeleteKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteKnowledgeBaseSend(context, knowledgeBaseName, options); + return _deleteKnowledgeBaseDeserialize(result); +} + +export function _createOrUpdateKnowledgeBaseSend( + context: Client, + knowledgeBase: KnowledgeBase, + knowledgeBaseName: string, + options: CreateOrUpdateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/knowledgebases('{knowledgeBaseName}'){?api%2Dversion}", + { + knowledgeBaseName: knowledgeBaseName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: knowledgeBaseSerializer(knowledgeBase), + }); +} + +export async function _createOrUpdateKnowledgeBaseDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return knowledgeBaseDeserializer(result.body); +} + +/** Creates a new knowledge base or updates a knowledge base if it already exists. */ +export async function createOrUpdateKnowledgeBase( + context: Client, + knowledgeBase: KnowledgeBase, + knowledgeBaseName: string, + options: CreateOrUpdateKnowledgeBaseOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateKnowledgeBaseSend( + context, + knowledgeBase, + knowledgeBaseName, + options, + ); + return _createOrUpdateKnowledgeBaseDeserialize(result); +} + +export function _createAliasSend( + context: Client, + alias: SearchAlias, + options: CreateAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchAliasSerializer(alias), + }); +} + +export async function _createAliasDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchAliasDeserializer(result.body); +} + +/** Creates a new search alias. */ +export async function createAlias( + context: Client, + alias: SearchAlias, + options: CreateAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createAliasSend(context, alias, options); + return _createAliasDeserialize(result); +} + +export function _listAliasesSend( + context: Client, + options: ListAliasesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listAliasesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListAliasesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listAliasesResultDeserializer(result.body); +} + +/** Lists all aliases available for a search service. */ +export function listAliases( + context: Client, + options: ListAliasesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listAliasesSend(context, options), + _listAliasesDeserialize, + ["200"], + { itemName: "aliases" }, + ); +} + +export function _getAliasSend( + context: Client, + aliasName: string, + options: GetAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases('{aliasName}'){?api%2Dversion}", + { + aliasName: aliasName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getAliasDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchAliasDeserializer(result.body); +} + +/** Retrieves an alias definition. */ +export async function getAlias( + context: Client, + aliasName: string, + options: GetAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getAliasSend(context, aliasName, options); + return _getAliasDeserialize(result); +} + +export function _deleteAliasSend( + context: Client, + aliasName: string, + options: DeleteAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases('{aliasName}'){?api%2Dversion}", + { + aliasName: aliasName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteAliasDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a search alias and its associated mapping to an index. This operation is permanent, with no recovery option. The mapped index is untouched by this operation. */ +export async function deleteAlias( + context: Client, + aliasName: string, + options: DeleteAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteAliasSend(context, aliasName, options); + return _deleteAliasDeserialize(result); +} + +export function _createOrUpdateAliasSend( + context: Client, + alias: SearchAlias, + aliasName: string, + options: CreateOrUpdateAliasOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/aliases('{aliasName}'){?api%2Dversion}", + { + aliasName: aliasName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchAliasSerializer(alias), + }); +} + +export async function _createOrUpdateAliasDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchAliasDeserializer(result.body); +} + +/** Creates a new search alias or updates an alias if it already exists. */ +export async function createOrUpdateAlias( + context: Client, + alias: SearchAlias, + aliasName: string, + options: CreateOrUpdateAliasOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateAliasSend(context, alias, aliasName, options); + return _createOrUpdateAliasDeserialize(result); +} + +export function _analyzeTextSend( + context: Client, + request: AnalyzeTextOptions, + indexName: string, + options: AnalyzeTextOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/search.analyze{?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: analyzeTextOptionsSerializer(request), + }); +} + +export async function _analyzeTextDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return analyzeResultDeserializer(result.body); +} + +/** Shows how an analyzer breaks text into tokens. */ +export async function analyzeText( + context: Client, + request: AnalyzeTextOptions, + indexName: string, + options: AnalyzeTextOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _analyzeTextSend(context, request, indexName, options); + return _analyzeTextDeserialize(result); +} + +export function _getIndexStatisticsSend( + context: Client, + indexName: string, + options: GetIndexStatisticsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}')/search.stats{?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexStatisticsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return getIndexStatisticsResultDeserializer(result.body); +} + +/** Returns statistics for the given index, including a document count and storage usage. */ +export async function getIndexStatistics( + context: Client, + indexName: string, + options: GetIndexStatisticsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexStatisticsSend(context, indexName, options); + return _getIndexStatisticsDeserialize(result); +} + +export function _createIndexSend( + context: Client, + index: SearchIndex, + options: CreateIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexSerializer(index), + }); +} + +export async function _createIndexDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexDeserializer(result.body); +} + +/** Creates a new search index. */ +export async function createIndex( + context: Client, + index: SearchIndex, + options: CreateIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createIndexSend(context, index, options); + return _createIndexDeserialize(result); +} + +export function _listIndexesSend( + context: Client, + options: ListIndexesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _listIndexesDeserialize( + result: PathUncheckedResponse, +): Promise<_ListIndexesResult> { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return _listIndexesResultDeserializer(result.body); +} + +/** Lists all indexes available for a search service. */ +export function listIndexes( + context: Client, + options: ListIndexesOptionalParams = { requestOptions: {} }, +): PagedAsyncIterableIterator { + return buildPagedAsyncIterator( + context, + () => _listIndexesSend(context, options), + _listIndexesDeserialize, + ["200"], + { itemName: "indexes" }, + ); +} + +export function _getIndexSend( + context: Client, + indexName: string, + options: GetIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}'){?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexDeserializer(result.body); +} + +/** Retrieves an index definition. */ +export async function getIndex( + context: Client, + indexName: string, + options: GetIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexSend(context, indexName, options); + return _getIndexDeserialize(result); +} + +export function _deleteIndexSend( + context: Client, + indexName: string, + options: DeleteIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}'){?api%2Dversion}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteIndexDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. */ +export async function deleteIndex( + context: Client, + indexName: string, + options: DeleteIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteIndexSend(context, indexName, options); + return _deleteIndexDeserialize(result); +} + +export function _createOrUpdateIndexSend( + context: Client, + index: SearchIndex, + indexName: string, + options: CreateOrUpdateIndexOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexes('{indexName}'){?api%2Dversion,allowIndexDowntime}", + { + indexName: indexName, + "api%2Dversion": context.apiVersion, + allowIndexDowntime: options?.allowIndexDowntime, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...(options?.querySourceAuthorization !== undefined + ? { + "x-ms-query-source-authorization": options?.querySourceAuthorization, + } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexSerializer(index), + }); +} + +export async function _createOrUpdateIndexDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexDeserializer(result.body); +} + +/** Creates a new search index or updates an index if it already exists. */ +export async function createOrUpdateIndex( + context: Client, + index: SearchIndex, + indexName: string, + options: CreateOrUpdateIndexOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateIndexSend(context, index, indexName, options); + return _createOrUpdateIndexDeserialize(result); +} + +export function _createSynonymMapSend( + context: Client, + synonymMap: SynonymMap, + options: CreateSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: synonymMapSerializer(synonymMap), + }); +} + +export async function _createSynonymMapDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return synonymMapDeserializer(result.body); +} + +/** Creates a new synonym map. */ +export async function createSynonymMap( + context: Client, + synonymMap: SynonymMap, + options: CreateSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createSynonymMapSend(context, synonymMap, options); + return _createSynonymMapDeserialize(result); +} + +export function _getSynonymMapsSend( + context: Client, + options: GetSynonymMapsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSynonymMapsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listSynonymMapsResultDeserializer(result.body); +} + +/** Lists all synonym maps available for a search service. */ +export async function getSynonymMaps( + context: Client, + options: GetSynonymMapsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSynonymMapsSend(context, options); + return _getSynonymMapsDeserialize(result); +} + +export function _getSynonymMapSend( + context: Client, + synonymMapName: string, + options: GetSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps('{synonymMapName}'){?api%2Dversion}", + { + synonymMapName: synonymMapName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSynonymMapDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return synonymMapDeserializer(result.body); +} + +/** Retrieves a synonym map definition. */ +export async function getSynonymMap( + context: Client, + synonymMapName: string, + options: GetSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSynonymMapSend(context, synonymMapName, options); + return _getSynonymMapDeserialize(result); +} + +export function _deleteSynonymMapSend( + context: Client, + synonymMapName: string, + options: DeleteSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps('{synonymMapName}'){?api%2Dversion}", + { + synonymMapName: synonymMapName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteSynonymMapDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a synonym map. */ +export async function deleteSynonymMap( + context: Client, + synonymMapName: string, + options: DeleteSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteSynonymMapSend(context, synonymMapName, options); + return _deleteSynonymMapDeserialize(result); +} + +export function _createOrUpdateSynonymMapSend( + context: Client, + synonymMap: SynonymMap, + synonymMapName: string, + options: CreateOrUpdateSynonymMapOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/synonymmaps('{synonymMapName}'){?api%2Dversion}", + { + synonymMapName: synonymMapName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: synonymMapSerializer(synonymMap), + }); +} + +export async function _createOrUpdateSynonymMapDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return synonymMapDeserializer(result.body); +} + +/** Creates a new synonym map or updates a synonym map if it already exists. */ +export async function createOrUpdateSynonymMap( + context: Client, + synonymMap: SynonymMap, + synonymMapName: string, + options: CreateOrUpdateSynonymMapOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateSynonymMapSend(context, synonymMap, synonymMapName, options); + return _createOrUpdateSynonymMapDeserialize(result); +} diff --git a/sdk/search/search-documents/src/searchIndex/api/options.ts b/sdk/search/search-documents/src/searchIndex/api/options.ts new file mode 100644 index 000000000000..d1ea6adfa624 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndex/api/options.ts @@ -0,0 +1,234 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface ListIndexStatsSummaryOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetServiceStatisticsOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateKnowledgeSourceOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListKnowledgeSourcesOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetKnowledgeSourceOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteKnowledgeSourceOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateKnowledgeSourceOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateKnowledgeBaseOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListKnowledgeBasesOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetKnowledgeBaseOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteKnowledgeBaseOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateKnowledgeBaseOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateAliasOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListAliasesOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetAliasOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteAliasOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateAliasOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface AnalyzeTextOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface GetIndexStatisticsOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface CreateIndexOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ListIndexesOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface DeleteIndexOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateIndexOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */ + allowIndexDowntime?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; + /** Token identifying the user for which the query is being executed. This token is used to enforce security restrictions on documents. */ + querySourceAuthorization?: string; +} + +/** Optional parameters. */ +export interface CreateSynonymMapOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSynonymMapsOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSynonymMapOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteSynonymMapOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateSynonymMapOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} diff --git a/sdk/search/search-documents/src/searchIndex/api/searchIndexContext.ts b/sdk/search/search-documents/src/searchIndex/api/searchIndexContext.ts new file mode 100644 index 000000000000..a44aa460b06d --- /dev/null +++ b/sdk/search/search-documents/src/searchIndex/api/searchIndexContext.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface SearchIndexContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; +} + +/** Optional parameters for the client. */ +export interface SearchIndexClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createSearchIndex( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexClientOptionalParams = {}, +): SearchIndexContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion } as SearchIndexContext; +} diff --git a/sdk/search/search-documents/src/searchIndex/index.ts b/sdk/search/search-documents/src/searchIndex/index.ts new file mode 100644 index 000000000000..cb9a864c1562 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndex/index.ts @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { SearchIndexClient } from "./searchIndexClient.js"; +export { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, + SearchIndexContext, + SearchIndexClientOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/src/searchIndex/searchIndexClient.ts b/sdk/search/search-documents/src/searchIndex/searchIndexClient.ts new file mode 100644 index 000000000000..cf4ab92af502 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndex/searchIndexClient.ts @@ -0,0 +1,343 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + createSearchIndex, + SearchIndexContext, + SearchIndexClientOptionalParams, +} from "./api/index.js"; +import { + SynonymMap, + ListSynonymMapsResult, + SearchIndex, + GetIndexStatisticsResult, + AnalyzeTextOptions, + AnalyzeResult, + SearchAlias, + KnowledgeBase, + KnowledgeSourceUnion, + SearchServiceStatistics, + IndexStatisticsSummary, +} from "../models/azure/search/documents/indexes/models.js"; +import { PagedAsyncIterableIterator } from "../static-helpers/pagingHelpers.js"; +import { + listIndexStatsSummary, + getServiceStatistics, + createKnowledgeSource, + listKnowledgeSources, + getKnowledgeSource, + deleteKnowledgeSource, + createOrUpdateKnowledgeSource, + createKnowledgeBase, + listKnowledgeBases, + getKnowledgeBase, + deleteKnowledgeBase, + createOrUpdateKnowledgeBase, + createAlias, + listAliases, + getAlias, + deleteAlias, + createOrUpdateAlias, + analyzeText, + getIndexStatistics, + createIndex, + listIndexes, + getIndex, + deleteIndex, + createOrUpdateIndex, + createSynonymMap, + getSynonymMaps, + getSynonymMap, + deleteSynonymMap, + createOrUpdateSynonymMap, +} from "./api/operations.js"; +import { + ListIndexStatsSummaryOptionalParams, + GetServiceStatisticsOptionalParams, + CreateKnowledgeSourceOptionalParams, + ListKnowledgeSourcesOptionalParams, + GetKnowledgeSourceOptionalParams, + DeleteKnowledgeSourceOptionalParams, + CreateOrUpdateKnowledgeSourceOptionalParams, + CreateKnowledgeBaseOptionalParams, + ListKnowledgeBasesOptionalParams, + GetKnowledgeBaseOptionalParams, + DeleteKnowledgeBaseOptionalParams, + CreateOrUpdateKnowledgeBaseOptionalParams, + CreateAliasOptionalParams, + ListAliasesOptionalParams, + GetAliasOptionalParams, + DeleteAliasOptionalParams, + CreateOrUpdateAliasOptionalParams, + AnalyzeTextOptionalParams, + GetIndexStatisticsOptionalParams, + CreateIndexOptionalParams, + ListIndexesOptionalParams, + GetIndexOptionalParams, + DeleteIndexOptionalParams, + CreateOrUpdateIndexOptionalParams, + CreateSynonymMapOptionalParams, + GetSynonymMapsOptionalParams, + GetSynonymMapOptionalParams, + DeleteSynonymMapOptionalParams, + CreateOrUpdateSynonymMapOptionalParams, +} from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { SearchIndexClientOptionalParams } from "./api/searchIndexContext.js"; + +export class SearchIndexClient { + private _client: SearchIndexContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createSearchIndex(endpointParam, credential, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** Retrieves a summary of statistics for all indexes in the search service. */ + listIndexStatsSummary( + options: ListIndexStatsSummaryOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listIndexStatsSummary(this._client, options); + } + + /** Gets service level statistics for a search service. */ + getServiceStatistics( + options: GetServiceStatisticsOptionalParams = { requestOptions: {} }, + ): Promise { + return getServiceStatistics(this._client, options); + } + + /** Creates a new knowledge source. */ + createKnowledgeSource( + knowledgeSource: KnowledgeSourceUnion, + options: CreateKnowledgeSourceOptionalParams = { requestOptions: {} }, + ): Promise { + return createKnowledgeSource(this._client, knowledgeSource, options); + } + + /** Lists all knowledge sources available for a search service. */ + listKnowledgeSources( + options: ListKnowledgeSourcesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listKnowledgeSources(this._client, options); + } + + /** Retrieves a knowledge source definition. */ + getKnowledgeSource( + sourceName: string, + options: GetKnowledgeSourceOptionalParams = { requestOptions: {} }, + ): Promise { + return getKnowledgeSource(this._client, sourceName, options); + } + + /** Deletes an existing knowledge source. */ + deleteKnowledgeSource( + sourceName: string, + options: DeleteKnowledgeSourceOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteKnowledgeSource(this._client, sourceName, options); + } + + /** Creates a new knowledge source or updates an knowledge source if it already exists. */ + createOrUpdateKnowledgeSource( + knowledgeSource: KnowledgeSourceUnion, + sourceName: string, + options: CreateOrUpdateKnowledgeSourceOptionalParams = { + requestOptions: {}, + }, + ): Promise { + return createOrUpdateKnowledgeSource(this._client, knowledgeSource, sourceName, options); + } + + /** Creates a new knowledge base. */ + createKnowledgeBase( + knowledgeBase: KnowledgeBase, + options: CreateKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return createKnowledgeBase(this._client, knowledgeBase, options); + } + + /** Lists all knowledge bases available for a search service. */ + listKnowledgeBases( + options: ListKnowledgeBasesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listKnowledgeBases(this._client, options); + } + + /** Retrieves a knowledge base definition. */ + getKnowledgeBase( + knowledgeBaseName: string, + options: GetKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return getKnowledgeBase(this._client, knowledgeBaseName, options); + } + + /** Deletes a knowledge base. */ + deleteKnowledgeBase( + knowledgeBaseName: string, + options: DeleteKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteKnowledgeBase(this._client, knowledgeBaseName, options); + } + + /** Creates a new knowledge base or updates a knowledge base if it already exists. */ + createOrUpdateKnowledgeBase( + knowledgeBase: KnowledgeBase, + knowledgeBaseName: string, + options: CreateOrUpdateKnowledgeBaseOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateKnowledgeBase(this._client, knowledgeBase, knowledgeBaseName, options); + } + + /** Creates a new search alias. */ + createAlias( + alias: SearchAlias, + options: CreateAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return createAlias(this._client, alias, options); + } + + /** Lists all aliases available for a search service. */ + listAliases( + options: ListAliasesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listAliases(this._client, options); + } + + /** Retrieves an alias definition. */ + getAlias( + aliasName: string, + options: GetAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return getAlias(this._client, aliasName, options); + } + + /** Deletes a search alias and its associated mapping to an index. This operation is permanent, with no recovery option. The mapped index is untouched by this operation. */ + deleteAlias( + aliasName: string, + options: DeleteAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteAlias(this._client, aliasName, options); + } + + /** Creates a new search alias or updates an alias if it already exists. */ + createOrUpdateAlias( + alias: SearchAlias, + aliasName: string, + options: CreateOrUpdateAliasOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateAlias(this._client, alias, aliasName, options); + } + + /** Shows how an analyzer breaks text into tokens. */ + analyzeText( + request: AnalyzeTextOptions, + indexName: string, + options: AnalyzeTextOptionalParams = { requestOptions: {} }, + ): Promise { + return analyzeText(this._client, request, indexName, options); + } + + /** Returns statistics for the given index, including a document count and storage usage. */ + getIndexStatistics( + indexName: string, + options: GetIndexStatisticsOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexStatistics(this._client, indexName, options); + } + + /** Creates a new search index. */ + createIndex( + index: SearchIndex, + options: CreateIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return createIndex(this._client, index, options); + } + + /** Lists all indexes available for a search service. */ + listIndexes( + options: ListIndexesOptionalParams = { requestOptions: {} }, + ): PagedAsyncIterableIterator { + return listIndexes(this._client, options); + } + + /** Retrieves an index definition. */ + getIndex( + indexName: string, + options: GetIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndex(this._client, indexName, options); + } + + /** Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. */ + deleteIndex( + indexName: string, + options: DeleteIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteIndex(this._client, indexName, options); + } + + /** Creates a new search index or updates an index if it already exists. */ + createOrUpdateIndex( + index: SearchIndex, + indexName: string, + options: CreateOrUpdateIndexOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateIndex(this._client, index, indexName, options); + } + + /** Creates a new synonym map. */ + createSynonymMap( + synonymMap: SynonymMap, + options: CreateSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return createSynonymMap(this._client, synonymMap, options); + } + + /** Lists all synonym maps available for a search service. */ + getSynonymMaps( + options: GetSynonymMapsOptionalParams = { requestOptions: {} }, + ): Promise { + return getSynonymMaps(this._client, options); + } + + /** Retrieves a synonym map definition. */ + getSynonymMap( + synonymMapName: string, + options: GetSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return getSynonymMap(this._client, synonymMapName, options); + } + + /** Deletes a synonym map. */ + deleteSynonymMap( + synonymMapName: string, + options: DeleteSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteSynonymMap(this._client, synonymMapName, options); + } + + /** Creates a new synonym map or updates a synonym map if it already exists. */ + createOrUpdateSynonymMap( + synonymMap: SynonymMap, + synonymMapName: string, + options: CreateOrUpdateSynonymMapOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateSynonymMap(this._client, synonymMap, synonymMapName, options); + } +} diff --git a/sdk/search/search-documents/src/searchIndexClient.ts b/sdk/search/search-documents/src/searchIndexClient.ts index 285f5d495500..81f359486e1c 100644 --- a/sdk/search/search-documents/src/searchIndexClient.ts +++ b/sdk/search/search-documents/src/searchIndexClient.ts @@ -5,16 +5,17 @@ import type { KeyCredential, TokenCredential } from "@azure/core-auth"; import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import type { Pipeline } from "@azure/core-rest-pipeline"; -import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; +import { + bearerTokenAuthenticationPolicy, + bearerTokenAuthenticationPolicyName, +} from "@azure/core-rest-pipeline"; import type { AnalyzeResult, - IndexStatisticsSummary, - KnowledgeSourceStatus, -} from "./generated/service/models/index.js"; -import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient.js"; + // KnowledgeSourceStatus, +} from "./models/azure/search/documents/indexes/index.js"; +import type { SearchIndexClientOptionalParams } from "./searchIndex/searchIndexClient.js"; +import { SearchIndexClient as GeneratedClient } from "./searchIndex/searchIndexClient.js"; import type { KnowledgeBase } from "./knowledgeBaseModels.js"; import type { KnowledgeRetrievalClientOptions as GetKnowledgeRetrievalClientOptions } from "./knowledgeRetrievalClient.js"; import { KnowledgeRetrievalClient } from "./knowledgeRetrievalClient.js"; @@ -48,7 +49,6 @@ import type { GetIndexStatsSummaryOptions, GetKnowledgeBaseOptions, GetKnowledgeSourceOptions, - GetKnowledgeSourceStatusOptions, GetServiceStatisticsOptions, GetSynonymMapsOptions, IndexIterator, @@ -69,12 +69,13 @@ import type { SynonymMap, } from "./serviceModels.js"; import * as utils from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +import { tracingClient } from "./tracing.js"; +import type { ClientOptions } from "@azure-rest/core-client"; /** * Client options used to configure AI Search API requests. */ -export interface SearchIndexClientOptions extends ExtendedCommonClientOptions { +export interface SearchIndexClientOptions extends ClientOptions { /** * The API version to use when communicating with the service. * @deprecated use {@Link serviceVersion} instead @@ -159,8 +160,13 @@ export class SearchIndexClient { this.credential = credential; this.options = options; - const internalClientPipelineOptions: InternalClientPipelineOptions = { + this.serviceVersion = + this.options.serviceVersion ?? this.options.apiVersion ?? utils.defaultServiceVersion; + this.apiVersion = this.serviceVersion; + + const internalClientPipelineOptions: SearchIndexClientOptionalParams = { ...this.options, + apiVersion: this.serviceVersion, ...{ loggingOptions: { logger: logger.info, @@ -176,17 +182,12 @@ export class SearchIndexClient { }, }; - this.serviceVersion = - this.options.serviceVersion ?? this.options.apiVersion ?? utils.defaultServiceVersion; - this.apiVersion = this.serviceVersion; - - this.client = new GeneratedClient( - this.endpoint, - this.serviceVersion, - internalClientPipelineOptions, - ); + this.client = new GeneratedClient(this.endpoint, credential, internalClientPipelineOptions); this.pipeline = this.client.pipeline; + // TODO: consider leaving the policy in-place instead of removing and re-adding + this.pipeline.removePolicy({ name: bearerTokenAuthenticationPolicyName }); + if (isTokenCredential(credential)) { const scope: string = this.options.audience ? `${this.options.audience}/.default` @@ -202,77 +203,15 @@ export class SearchIndexClient { this.client.pipeline.addPolicy(createOdataMetadataPolicy("minimal")); } - private async *listIndexesPage( - options: ListIndexesOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan("SearchIndexClient-listIndexesPage", options); - try { - const result = await this.client.indexes.list(updatedOptions); - const mapped = result.indexes.map(utils.generatedIndexToPublicIndex); - yield mapped; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listIndexesAll( - options: ListIndexesOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.listIndexesPage(options)) { - yield* page; - } - } - /** * Retrieves a list of existing indexes in the service. * @param options - Options to the list index operation. */ public listIndexes(options: ListIndexesOptions = {}): IndexIterator { - const iter = this.listIndexesAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.listIndexesPage(options); - }, - }; - } - - private async *listAliasesPage( - options: ListAliasesOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan("SearchIndexClient-listAliases", options); - try { - const result = await this.client.aliases.list(updatedOptions); - yield result.aliases; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listAliasesAll( - options: ListAliasesOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.listAliasesPage(options)) { - yield* page; - } + return utils.mapPagedAsyncIterable( + this.client.listIndexes(options), + utils.generatedIndexToPublicIndex, + ); } /** @@ -280,49 +219,7 @@ export class SearchIndexClient { * @param options - The options parameters. */ public listAliases(options: ListAliasesOptions = {}): AliasIterator { - const iter = this.listAliasesAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.listAliasesPage(options); - }, - }; - } - - private async *listIndexesNamesPage( - options: ListIndexesOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan("SearchIndexClient-listIndexesNamesPage", options); - try { - const result = await this.client.indexes.list({ - ...updatedOptions, - select: "name", - }); - const mapped = result.indexes.map((idx) => idx.name); - yield mapped; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listIndexesNamesAll( - options: ListIndexesOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.listIndexesNamesPage(options)) { - yield* page; - } + return this.client.listAliases(options); } /** @@ -331,19 +228,10 @@ export class SearchIndexClient { */ // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public listIndexesNames(options: ListIndexesOptions = {}): IndexNameIterator { - const iter = this.listIndexesNamesAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.listIndexesNamesPage(options); - }, - }; + return utils.mapPagedAsyncIterable( + this.client.listIndexes({ ...options, select: "name" }), + (idx) => idx.name, + ); } /** @@ -351,19 +239,14 @@ export class SearchIndexClient { * @param options - Options to the list SynonymMaps operation. */ public async listSynonymMaps(options: ListSynonymMapsOptions = {}): Promise> { - const { span, updatedOptions } = createSpan("SearchIndexClient-listSynonymMaps", options); - try { - const result = await this.client.synonymMaps.list(updatedOptions); - return result.synonymMaps.map(utils.generatedSynonymMapToPublicSynonymMap); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-listSynonymMaps", + options, + async (updatedOptions) => { + const result = await this.client.getSynonymMaps(updatedOptions); + return result.synonymMaps.map(utils.generatedSynonymMapToPublicSynonymMap); + }, + ); } /** @@ -372,22 +255,17 @@ export class SearchIndexClient { */ // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async listSynonymMapsNames(options: ListSynonymMapsOptions = {}): Promise> { - const { span, updatedOptions } = createSpan("SearchIndexClient-listSynonymMapsNames", options); - try { - const result = await this.client.synonymMaps.list({ - ...updatedOptions, - select: "name", - }); - return result.synonymMaps.map((sm) => sm.name); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-listSynonymMapsNames", + options, + async (updatedOptions) => { + const result = await this.client.getSynonymMaps({ + ...updatedOptions, + select: "name", + }); + return result.synonymMaps.map((sm) => sm.name); + }, + ); } /** @@ -396,19 +274,10 @@ export class SearchIndexClient { * @param options - Additional optional arguments. */ public async getIndex(indexName: string, options: GetIndexOptions = {}): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getIndex", options); - try { - const result = await this.client.indexes.get(indexName, updatedOptions); + return tracingClient.withSpan("SearchIndexClient-getIndex", options, async (updatedOptions) => { + const result = await this.client.getIndex(indexName, updatedOptions); return utils.generatedIndexToPublicIndex(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + }); } /** @@ -421,19 +290,14 @@ export class SearchIndexClient { // eslint-disable-next-line @azure/azure-sdk/ts-naming-options options: GetSynonymMapsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getSynonymMaps", options); - try { - const result = await this.client.synonymMaps.get(synonymMapName, updatedOptions); - return utils.generatedSynonymMapToPublicSynonymMap(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-getSynonymMap", + options, + async (updatedOptions) => { + const result = await this.client.getSynonymMap(synonymMapName, updatedOptions); + return utils.generatedSynonymMapToPublicSynonymMap(result); + }, + ); } /** @@ -445,22 +309,17 @@ export class SearchIndexClient { index: SearchIndex, options: CreateIndexOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createIndex", options); - try { - const result = await this.client.indexes.create( - utils.publicIndexToGeneratedIndex(index), - updatedOptions, - ); - return utils.generatedIndexToPublicIndex(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-createIndex", + options, + async (updatedOptions) => { + const result = await this.client.createIndex( + utils.publicIndexToGeneratedIndex(index), + updatedOptions, + ); + return utils.generatedIndexToPublicIndex(result); + }, + ); } /** @@ -472,22 +331,17 @@ export class SearchIndexClient { synonymMap: SynonymMap, options: CreateSynonymMapOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createSynonymMaps", options); - try { - const result = await this.client.synonymMaps.create( - utils.publicSynonymMapToGeneratedSynonymMap(synonymMap), - updatedOptions, - ); - return utils.generatedSynonymMapToPublicSynonymMap(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-createSynonymMaps", + options, + async (updatedOptions) => { + const result = await this.client.createSynonymMap( + utils.publicSynonymMapToGeneratedSynonymMap(synonymMap), + updatedOptions, + ); + return utils.generatedSynonymMapToPublicSynonymMap(result); + }, + ); } /** @@ -499,28 +353,19 @@ export class SearchIndexClient { index: SearchIndex, options: CreateOrUpdateIndexOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createOrUpdateIndex", options); - try { - const etag = options.onlyIfUnchanged ? index.etag : undefined; - - const result = await this.client.indexes.createOrUpdate( - index.name, - utils.publicIndexToGeneratedIndex(index), - { - ...updatedOptions, - ifMatch: etag, - }, - ); - return utils.generatedIndexToPublicIndex(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-createOrUpdateIndex", + options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? index.etag : undefined; + const result = await this.client.createOrUpdateIndex( + utils.publicIndexToGeneratedIndex(index), + index.name, + { ...updatedOptions, ifMatch: etag }, + ); + return utils.generatedIndexToPublicIndex(result); + }, + ); } /** @@ -532,31 +377,23 @@ export class SearchIndexClient { synonymMap: SynonymMap, options: CreateOrUpdateSynonymMapOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexClient-createOrUpdateSynonymMap", options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? synonymMap.etag : undefined; + + const result = await this.client.createOrUpdateSynonymMap( + utils.publicSynonymMapToGeneratedSynonymMap(synonymMap), + synonymMap.name, + { + ...updatedOptions, + ifMatch: etag, + }, + ); + return utils.generatedSynonymMapToPublicSynonymMap(result); + }, ); - try { - const etag = options.onlyIfUnchanged ? synonymMap.etag : undefined; - - const result = await this.client.synonymMaps.createOrUpdate( - synonymMap.name, - utils.publicSynonymMapToGeneratedSynonymMap(synonymMap), - { - ...updatedOptions, - ifMatch: etag, - }, - ); - return utils.generatedSynonymMapToPublicSynonymMap(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -576,25 +413,20 @@ export class SearchIndexClient { index: string | SearchIndex, options: DeleteIndexOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-deleteIndex", options); - try { - const indexName: string = typeof index === "string" ? index : index.name; - const etag = - typeof index === "string" ? undefined : options.onlyIfUnchanged ? index.etag : undefined; + return tracingClient.withSpan( + "SearchIndexClient-deleteIndex", + options, + async (updatedOptions) => { + const indexName: string = typeof index === "string" ? index : index.name; + const etag = + typeof index === "string" ? undefined : options.onlyIfUnchanged ? index.etag : undefined; - await this.client.indexes.delete(indexName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + await this.client.deleteIndex(indexName, { + ...updatedOptions, + ifMatch: etag, + }); + }, + ); } /** @@ -606,29 +438,25 @@ export class SearchIndexClient { synonymMap: string | SynonymMap, options: DeleteSynonymMapOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-deleteSynonymMap", options); - try { - const synonymMapName: string = typeof synonymMap === "string" ? synonymMap : synonymMap.name; - const etag = - typeof synonymMap === "string" - ? undefined - : options.onlyIfUnchanged - ? synonymMap.etag - : undefined; - - await this.client.synonymMaps.delete(synonymMapName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-deleteSynonymMap", + options, + async (updatedOptions) => { + const synonymMapName: string = + typeof synonymMap === "string" ? synonymMap : synonymMap.name; + const etag = + typeof synonymMap === "string" + ? undefined + : options.onlyIfUnchanged + ? synonymMap.etag + : undefined; + + await this.client.deleteSynonymMap(synonymMapName, { + ...updatedOptions, + ifMatch: etag, + }); + }, + ); } /** @@ -640,24 +468,17 @@ export class SearchIndexClient { alias: SearchIndexAlias, options: CreateOrUpdateAliasOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createOrUpdateAlias", options); - try { - const etag = options.onlyIfUnchanged ? alias.etag : undefined; - - const result = await this.client.aliases.createOrUpdate(alias.name, alias, { - ...updatedOptions, - ifMatch: etag, - }); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-createOrUpdateAlias", + options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? alias.eTag : undefined; + return this.client.createOrUpdateAlias(alias, alias.name, { + ...updatedOptions, + ifMatch: etag, + }); + }, + ); } /** @@ -669,19 +490,13 @@ export class SearchIndexClient { alias: SearchIndexAlias, options: CreateAliasOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createAlias", options); - try { - const result = await this.client.aliases.create(alias, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-createAlias", + options, + async (updatedOptions) => { + return this.client.createAlias(alias, updatedOptions); + }, + ); } /** @@ -703,25 +518,20 @@ export class SearchIndexClient { alias: string | SearchIndexAlias, options: DeleteAliasOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-deleteAlias", options); - try { - const aliasName: string = typeof alias === "string" ? alias : alias.name; - const etag = - typeof alias === "string" ? undefined : options.onlyIfUnchanged ? alias.etag : undefined; + return tracingClient.withSpan( + "SearchIndexClient-deleteAlias", + options, + async (updatedOptions) => { + const aliasName: string = typeof alias === "string" ? alias : alias.name; + const etag = + typeof alias === "string" ? undefined : options.onlyIfUnchanged ? alias.eTag : undefined; - await this.client.aliases.delete(aliasName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + await this.client.deleteAlias(aliasName, { + ...updatedOptions, + ifMatch: etag, + }); + }, + ); } /** @@ -733,19 +543,9 @@ export class SearchIndexClient { aliasName: string, options: GetAliasOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getAlias", options); - try { - const result = await this.client.aliases.get(aliasName, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan("SearchIndexClient-getAlias", options, async (updatedOptions) => { + return this.client.getAlias(aliasName, updatedOptions); + }); } /** @@ -758,19 +558,13 @@ export class SearchIndexClient { indexName: string, options: GetIndexStatisticsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getIndexStatistics", options); - try { - const result = await this.client.indexes.getStatistics(indexName, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-getIndexStatistics", + options, + async (updatedOptions) => { + return this.client.getIndexStatistics(indexName, updatedOptions); + }, + ); } /** @@ -780,14 +574,7 @@ export class SearchIndexClient { * @param options - Additional arguments */ public async analyzeText(indexName: string, options: AnalyzeTextOptions): Promise { - const { - abortSignal, - requestOptions, - tracingOptions, - analyzerName: analyzer, - tokenizerName: tokenizer, - ...restOptions - } = options; + const { abortSignal, requestOptions, tracingOptions, ...restOptions } = options; const operationOptions = { abortSignal, @@ -795,24 +582,13 @@ export class SearchIndexClient { tracingOptions, }; - const { span, updatedOptions } = createSpan("SearchIndexClient-analyzeText", operationOptions); - - try { - const result = await this.client.indexes.analyze( - indexName, - { ...restOptions, analyzer, tokenizer }, - updatedOptions, - ); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-analyzeText", + operationOptions, + async (updatedOptions) => { + return this.client.analyzeText({ ...restOptions }, indexName, updatedOptions); + }, + ); } /** @@ -822,48 +598,13 @@ export class SearchIndexClient { public async getServiceStatistics( options: GetServiceStatisticsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getServiceStatistics", options); - try { - const result = await this.client.getServiceStatistics(updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *getIndexStatsSummaryPage( - options: GetIndexStatsSummaryOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan( - "SearchIndexClient-getIndexStatsSummaryPage", + return tracingClient.withSpan( + "SearchIndexClient-getServiceStatistics", options, + async (updatedOptions) => { + return this.client.getServiceStatistics(updatedOptions); + }, ); - try { - const { indexesStatistics } = await this.client.getIndexStatsSummary(updatedOptions); - yield indexesStatistics; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *getIndexStatsSummaryAll( - options: GetIndexStatsSummaryOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.getIndexStatsSummaryPage(options)) { - yield* page; - } } /** @@ -873,19 +614,7 @@ export class SearchIndexClient { public getIndexStatsSummary( options: GetIndexStatsSummaryOptions = {}, ): IndexStatisticsSummaryIterator { - const iter = this.getIndexStatsSummaryAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.getIndexStatsSummaryPage(options); - }, - }; + return this.client.listIndexStatsSummary(options); } /** @@ -895,24 +624,19 @@ export class SearchIndexClient { */ public async createKnowledgeBase( knowledgeBase: KnowledgeBase, - options?: CreateKnowledgeBaseOptions, + options: CreateKnowledgeBaseOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createKnowledgeBase", options); - try { - const result = await this.client.knowledgeBases.create( - utils.convertKnowledgeBaseToGenerated(knowledgeBase)!, - updatedOptions, - ); - return utils.convertKnowledgeBaseToPublic(result)!; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexClient-createKnowledgeBase", + options, + async (updatedOptions) => { + const result = await this.client.createKnowledgeBase( + utils.convertKnowledgeBaseToGenerated(knowledgeBase)!, + updatedOptions, + ); + return utils.convertKnowledgeBaseToPublic(result)!; + }, + ); } /** @@ -924,33 +648,24 @@ export class SearchIndexClient { public async createOrUpdateKnowledgeBase( knowledgeBaseName: string, knowledgeBase: KnowledgeBase, - options?: CreateOrUpdateKnowledgeBaseOptions, + options: CreateOrUpdateKnowledgeBaseOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexClient-createOrUpdateKnowledgeBase", options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? knowledgeBase.etag : undefined; + const result = await this.client.createOrUpdateKnowledgeBase( + utils.convertKnowledgeBaseToGenerated(knowledgeBase)!, + knowledgeBaseName, + { + ...updatedOptions, + ifMatch: etag, + }, + ); + return utils.convertKnowledgeBaseToPublic(result)!; + }, ); - try { - const etag = updatedOptions.onlyIfUnchanged ? knowledgeBase.etag : undefined; - - const result = await this.client.knowledgeBases.createOrUpdate( - knowledgeBaseName, - utils.convertKnowledgeBaseToGenerated(knowledgeBase)!, - { - ...updatedOptions, - ifMatch: etag, - }, - ); - return utils.convertKnowledgeBaseToPublic(result)!; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -960,51 +675,16 @@ export class SearchIndexClient { */ public async getKnowledgeBase( knowledgeBaseName: string, - options?: GetKnowledgeBaseOptions, + options: GetKnowledgeBaseOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getKnowledgeBase", options); - try { - const result = await this.client.knowledgeBases.get(knowledgeBaseName, updatedOptions); - return utils.convertKnowledgeBaseToPublic(result)!; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listKnowledgeBasesPage( - options: ListKnowledgeBasesOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan( - "SearchIndexClient-listKnowledgeBasesPage", + return tracingClient.withSpan( + "SearchIndexClient-getKnowledgeBase", options, + async (updatedOptions) => { + const result = await this.client.getKnowledgeBase(knowledgeBaseName, updatedOptions); + return utils.convertKnowledgeBaseToPublic(result)!; + }, ); - try { - const { knowledgeBases } = await this.client.knowledgeBases.list(updatedOptions); - const mapped = knowledgeBases.map((base) => utils.convertKnowledgeBaseToPublic(base)!); - yield mapped; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listKnowledgeBasesAll( - options: ListKnowledgeBasesOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.listKnowledgeBasesPage(options)) { - yield* page; - } } /** @@ -1012,19 +692,10 @@ export class SearchIndexClient { * @param options - Options to the list knowledge bases operation. */ public listKnowledgeBases(options: ListKnowledgeBasesOptions = {}): KnowledgeBaseIterator { - const iter = this.listKnowledgeBasesAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.listKnowledgeBasesPage(options); - }, - }; + return utils.mapPagedAsyncIterable( + this.client.listKnowledgeBases(options), + utils.convertKnowledgeBaseToPublic, + ); } /** @@ -1047,63 +718,49 @@ export class SearchIndexClient { ): Promise; public async deleteKnowledgeBase( knowledgeBase: string | KnowledgeBase, - options?: DeleteKnowledgeBaseOptions, + options: DeleteKnowledgeBaseOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-deleteKnowledgeBase", options); - try { - const knowledgeBaseName = - typeof knowledgeBase === "string" ? knowledgeBase : knowledgeBase.name; - const etag = - typeof knowledgeBase !== "string" && updatedOptions.onlyIfUnchanged - ? knowledgeBase.etag - : undefined; + return tracingClient.withSpan( + "SearchIndexClient-deleteKnowledgeBase", + options, + async (updatedOptions) => { + const knowledgeBaseName = + typeof knowledgeBase === "string" ? knowledgeBase : knowledgeBase.name; + const etag = + typeof knowledgeBase !== "string" && options.onlyIfUnchanged + ? knowledgeBase.etag + : undefined; - const result = await this.client.knowledgeBases.delete(knowledgeBaseName, { - ...updatedOptions, - ifMatch: etag, - }); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + const result = await this.client.deleteKnowledgeBase(knowledgeBaseName, { + ...updatedOptions, + ifMatch: etag, + }); + return result; + }, + ); } public async createOrUpdateKnowledgeSource( sourceName: string, knowledgeSource: KnowledgeSource, - options?: CreateOrUpdateKnowledgeSourceOptions, + options: CreateOrUpdateKnowledgeSourceOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexClient-createOrUpdateKnowledgeSource", options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? knowledgeSource.etag : undefined; + const result = await this.client.createOrUpdateKnowledgeSource( + utils.convertKnowledgeSourceToGenerated(knowledgeSource)!, + sourceName, + { + ...updatedOptions, + ifMatch: etag, + }, + ); + return utils.convertKnowledgeSourceToPublic(result)!; + }, ); - try { - const etag = updatedOptions.onlyIfUnchanged ? knowledgeSource.etag : undefined; - - const result = await this.client.knowledgeSources.createOrUpdate( - sourceName, - utils.convertKnowledgeSourceToGenerated(knowledgeSource)!, - { - ...updatedOptions, - ifMatch: etag, - }, - ); - return utils.convertKnowledgeSourceToPublic(result)!; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -1126,28 +783,19 @@ export class SearchIndexClient { ): Promise; public async deleteKnowledgeSource( source: string | KnowledgeSource, - options?: DeleteKnowledgeSourceOptions, + options: DeleteKnowledgeSourceOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-deleteKnowledgeSource", options); - try { - const sourceName = typeof source === "string" ? source : source.name; - const etag = - typeof source !== "string" && updatedOptions.onlyIfUnchanged ? source.etag : undefined; + return tracingClient.withSpan( + "SearchIndexClient-deleteKnowledgeSource", + options, + async (updatedOptions) => { + const sourceName = typeof source === "string" ? source : source.name; + const etag = + typeof source !== "string" && options.onlyIfUnchanged ? source.etag : undefined; - const result = await this.client.knowledgeSources.delete(sourceName, { - ...updatedOptions, - ifMatch: etag, - }); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return this.client.deleteKnowledgeSource(sourceName, { ...updatedOptions, ifMatch: etag }); + }, + ); } /** @@ -1157,73 +805,26 @@ export class SearchIndexClient { */ public async getKnowledgeSource( sourceName: string, - options?: GetKnowledgeSourceOptions, + options: GetKnowledgeSourceOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getKnowledgeSource", options); - try { - const result = await this.client.knowledgeSources.get(sourceName, updatedOptions); - return utils.convertKnowledgeSourceToPublic(result)!; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listKnowledgeSourcesPage( - options: ListKnowledgeSourcesOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan( - "SearchIndexClient-listKnowledgeSourcesPage", + return tracingClient.withSpan( + "SearchIndexClient-getKnowledgeSource", options, + async (updatedOptions) => { + const result = await this.client.getKnowledgeSource(sourceName, updatedOptions); + return utils.convertKnowledgeSourceToPublic(result)!; + }, ); - try { - const { knowledgeSources } = await this.client.knowledgeSources.list(updatedOptions); - const mapped = knowledgeSources.map( - (source) => utils.convertKnowledgeSourceToPublic(source)!, - ); - yield mapped; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listKnowledgeSourcesAll( - options: ListKnowledgeSourcesOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.listKnowledgeSourcesPage(options)) { - yield* page; - } } - /** * Retrieves a list of existing KnowledgeSources in the service. * @param options - Options to the list knowledge sources operation. */ public listKnowledgeSources(options: ListKnowledgeSourcesOptions = {}): KnowledgeSourceIterator { - const iter = this.listKnowledgeSourcesAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.listKnowledgeSourcesPage(options); - }, - }; + return utils.mapPagedAsyncIterable( + this.client.listKnowledgeSources(options), + (ks) => utils.convertKnowledgeSourceToPublic(ks)!, + ); } /** @@ -1233,53 +834,40 @@ export class SearchIndexClient { */ public async createKnowledgeSource( knowledgeSource: KnowledgeSource, - options?: CreateKnowledgeSourceOptions, + options: CreateKnowledgeSourceOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createKnowledgeSource", options); - try { - const result = await this.client.knowledgeSources.create( - utils.convertKnowledgeSourceToGenerated(knowledgeSource)!, - updatedOptions, - ); - return utils.convertKnowledgeSourceToPublic(result)!; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - /** - * Returns the current status and synchronization history of a knowledge source. - * @param sourceName - The name of the knowledge source for which to retrieve status. - * @param options - The options parameters. - */ - public async getKnowledgeSourceStatus( - sourceName: string, - options?: GetKnowledgeSourceStatusOptions, - ): Promise { - const { span, updatedOptions } = createSpan( - "SearchIndexClient-getKnowledgeSourceStatus", + return tracingClient.withSpan( + "SearchIndexClient-createKnowledgeSource", options, + async (updatedOptions) => { + const result = await this.client.createKnowledgeSource( + utils.convertKnowledgeSourceToGenerated(knowledgeSource)!, + updatedOptions, + ); + return utils.convertKnowledgeSourceToPublic(result)!; + }, ); - try { - const result = await this.client.knowledgeSources.getStatus(sourceName, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } + // /** + // * Returns the current status and synchronization history of a knowledge source. + // * @param sourceName - The name of the knowledge source for which to retrieve status. + // * @param options - The options parameters. + // */ + // public async getKnowledgeSourceStatus( + // sourceName: string, + // options: GetKnowledgeSourceStatusOptions = {}, + // ): Promise { + // return tracingClient.withSpan( + // "SearchIndexClient-getKnowledgeSourceStatus", + // options, + // async (updatedOptions) => { + // const result = await this.client.knowledgeSources.getStatus(sourceName, updatedOptions); + // return result; + // }, + // ); + // } + /** * Retrieves the SearchClient corresponding to this SearchIndexClient * @param indexName - Name of the index diff --git a/sdk/search/search-documents/src/searchIndexer/api/index.ts b/sdk/search/search-documents/src/searchIndexer/api/index.ts new file mode 100644 index 000000000000..66ccb1240c61 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndexer/api/index.ts @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { + resetSkills, + createSkillset, + getSkillsets, + getSkillset, + deleteSkillset, + createOrUpdateSkillset, + getIndexerStatus, + createIndexer, + getIndexers, + getIndexer, + deleteIndexer, + createOrUpdateIndexer, + runIndexer, + resetDocuments, + resync, + resetIndexer, + createDataSourceConnection, + getDataSourceConnections, + getDataSourceConnection, + deleteDataSourceConnection, + createOrUpdateDataSourceConnection, +} from "./operations.js"; +export { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, +} from "./options.js"; +export { + createSearchIndexer, + SearchIndexerContext, + SearchIndexerClientOptionalParams, +} from "./searchIndexerContext.js"; diff --git a/sdk/search/search-documents/src/searchIndexer/api/operations.ts b/sdk/search/search-documents/src/searchIndexer/api/operations.ts new file mode 100644 index 000000000000..ef684553e558 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndexer/api/operations.ts @@ -0,0 +1,1132 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { SearchIndexerContext as Client } from "./index.js"; +import { + SearchIndexerDataSourceConnection, + searchIndexerDataSourceConnectionSerializer, + searchIndexerDataSourceConnectionDeserializer, + ListDataSourcesResult, + listDataSourcesResultDeserializer, + documentKeysOrIdsSerializer, + SearchIndexer, + searchIndexerSerializer, + searchIndexerDeserializer, + ListIndexersResult, + listIndexersResultDeserializer, + SearchIndexerStatus, + searchIndexerStatusDeserializer, + SearchIndexerSkillset, + searchIndexerSkillsetSerializer, + searchIndexerSkillsetDeserializer, + ListSkillsetsResult, + listSkillsetsResultDeserializer, + SkillNames, + skillNamesSerializer, +} from "../../models/azure/search/documents/indexes/models.js"; +import { errorResponseDeserializer } from "../../models/azure/search/documents/models.js"; +import { expandUrlTemplate } from "../../static-helpers/urlTemplate.js"; +import { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, +} from "./options.js"; +import { + StreamableMethod, + PathUncheckedResponse, + createRestError, + operationOptionsToRequestParameters, +} from "@azure-rest/core-client"; + +export function _resetSkillsSend( + context: Client, + skillNames: SkillNames, + skillsetName: string, + options: ResetSkillsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}')/search.resetskills{?api%2Dversion}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + body: skillNamesSerializer(skillNames), + }); +} + +export async function _resetSkillsDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Reset an existing skillset in a search service. */ +export async function resetSkills( + context: Client, + skillNames: SkillNames, + skillsetName: string, + options: ResetSkillsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resetSkillsSend(context, skillNames, skillsetName, options); + return _resetSkillsDeserialize(result); +} + +export function _createSkillsetSend( + context: Client, + skillset: SearchIndexerSkillset, + options: CreateSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSkillsetSerializer(skillset), + }); +} + +export async function _createSkillsetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerSkillsetDeserializer(result.body); +} + +/** Creates a new skillset in a search service. */ +export async function createSkillset( + context: Client, + skillset: SearchIndexerSkillset, + options: CreateSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createSkillsetSend(context, skillset, options); + return _createSkillsetDeserialize(result); +} + +export function _getSkillsetsSend( + context: Client, + options: GetSkillsetsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSkillsetsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listSkillsetsResultDeserializer(result.body); +} + +/** List all skillsets in a search service. */ +export async function getSkillsets( + context: Client, + options: GetSkillsetsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSkillsetsSend(context, options); + return _getSkillsetsDeserialize(result); +} + +export function _getSkillsetSend( + context: Client, + skillsetName: string, + options: GetSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}'){?api%2Dversion}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getSkillsetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerSkillsetDeserializer(result.body); +} + +/** Retrieves a skillset in a search service. */ +export async function getSkillset( + context: Client, + skillsetName: string, + options: GetSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getSkillsetSend(context, skillsetName, options); + return _getSkillsetDeserialize(result); +} + +export function _deleteSkillsetSend( + context: Client, + skillsetName: string, + options: DeleteSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}'){?api%2Dversion}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteSkillsetDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a skillset in a search service. */ +export async function deleteSkillset( + context: Client, + skillsetName: string, + options: DeleteSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteSkillsetSend(context, skillsetName, options); + return _deleteSkillsetDeserialize(result); +} + +export function _createOrUpdateSkillsetSend( + context: Client, + skillset: SearchIndexerSkillset, + skillsetName: string, + options: CreateOrUpdateSkillsetOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/skillsets('{skillsetName}'){?api%2Dversion,ignoreResetRequirements,disableCacheReprocessingChangeDetection}", + { + skillsetName: skillsetName, + "api%2Dversion": context.apiVersion, + ignoreResetRequirements: options?.skipIndexerResetRequirementForCache, + disableCacheReprocessingChangeDetection: options?.disableCacheReprocessingChangeDetection, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSkillsetSerializer(skillset), + }); +} + +export async function _createOrUpdateSkillsetDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerSkillsetDeserializer(result.body); +} + +/** Creates a new skillset in a search service or updates the skillset if it already exists. */ +export async function createOrUpdateSkillset( + context: Client, + skillset: SearchIndexerSkillset, + skillsetName: string, + options: CreateOrUpdateSkillsetOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateSkillsetSend(context, skillset, skillsetName, options); + return _createOrUpdateSkillsetDeserialize(result); +} + +export function _getIndexerStatusSend( + context: Client, + indexerName: string, + options: GetIndexerStatusOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.status{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexerStatusDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerStatusDeserializer(result.body); +} + +/** Returns the current status and execution history of an indexer. */ +export async function getIndexerStatus( + context: Client, + indexerName: string, + options: GetIndexerStatusOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexerStatusSend(context, indexerName, options); + return _getIndexerStatusDeserialize(result); +} + +export function _createIndexerSend( + context: Client, + indexer: SearchIndexer, + options: CreateIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSerializer(indexer), + }); +} + +export async function _createIndexerDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDeserializer(result.body); +} + +/** Creates a new indexer. */ +export async function createIndexer( + context: Client, + indexer: SearchIndexer, + options: CreateIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createIndexerSend(context, indexer, options); + return _createIndexerDeserialize(result); +} + +export function _getIndexersSend( + context: Client, + options: GetIndexersOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexersDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listIndexersResultDeserializer(result.body); +} + +/** Lists all indexers available for a search service. */ +export async function getIndexers( + context: Client, + options: GetIndexersOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexersSend(context, options); + return _getIndexersDeserialize(result); +} + +export function _getIndexerSend( + context: Client, + indexerName: string, + options: GetIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}'){?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getIndexerDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDeserializer(result.body); +} + +/** Retrieves an indexer definition. */ +export async function getIndexer( + context: Client, + indexerName: string, + options: GetIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getIndexerSend(context, indexerName, options); + return _getIndexerDeserialize(result); +} + +export function _deleteIndexerSend( + context: Client, + indexerName: string, + options: DeleteIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}'){?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteIndexerDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes an indexer. */ +export async function deleteIndexer( + context: Client, + indexerName: string, + options: DeleteIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteIndexerSend(context, indexerName, options); + return _deleteIndexerDeserialize(result); +} + +export function _createOrUpdateIndexerSend( + context: Client, + indexer: SearchIndexer, + indexerName: string, + options: CreateOrUpdateIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}'){?api%2Dversion,ignoreResetRequirements,disableCacheReprocessingChangeDetection}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + ignoreResetRequirements: options?.skipIndexerResetRequirementForCache, + disableCacheReprocessingChangeDetection: options?.disableCacheReprocessingChangeDetection, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerSerializer(indexer), + }); +} + +export async function _createOrUpdateIndexerDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDeserializer(result.body); +} + +/** Creates a new indexer or updates an indexer if it already exists. */ +export async function createOrUpdateIndexer( + context: Client, + indexer: SearchIndexer, + indexerName: string, + options: CreateOrUpdateIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createOrUpdateIndexerSend(context, indexer, indexerName, options); + return _createOrUpdateIndexerDeserialize(result); +} + +export function _runIndexerSend( + context: Client, + indexerName: string, + options: RunIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.run{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _runIndexerDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["202"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Runs an indexer on-demand. */ +export async function runIndexer( + context: Client, + indexerName: string, + options: RunIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _runIndexerSend(context, indexerName, options); + return _runIndexerDeserialize(result); +} + +export function _resetDocumentsSend( + context: Client, + indexerName: string, + options: ResetDocumentsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.resetdocs{?api%2Dversion,overwrite}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + overwrite: options?.overwrite, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + body: !options["keysOrIds"] + ? options["keysOrIds"] + : documentKeysOrIdsSerializer(options["keysOrIds"]), + }); +} + +export async function _resetDocumentsDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Resets specific documents in the datasource to be selectively re-ingested by the indexer. */ +export async function resetDocuments( + context: Client, + indexerName: string, + options: ResetDocumentsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resetDocumentsSend(context, indexerName, options); + return _resetDocumentsDeserialize(result); +} + +export function _resyncSend( + context: Client, + indexerName: string, + options: ResyncOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.resync{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _resyncDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Resync selective options from the datasource to be re-ingested by the indexer." */ +export async function resync( + context: Client, + indexerName: string, + options: ResyncOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resyncSend(context, indexerName, options); + return _resyncDeserialize(result); +} + +export function _resetIndexerSend( + context: Client, + indexerName: string, + options: ResetIndexerOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/indexers('{indexerName}')/search.reset{?api%2Dversion}", + { + indexerName: indexerName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _resetIndexerDeserialize(result: PathUncheckedResponse): Promise { + const expectedStatuses = ["204"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Resets the change tracking state associated with an indexer. */ +export async function resetIndexer( + context: Client, + indexerName: string, + options: ResetIndexerOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _resetIndexerSend(context, indexerName, options); + return _resetIndexerDeserialize(result); +} + +export function _createDataSourceConnectionSend( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + options: CreateDataSourceConnectionOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources{?api%2Dversion}", + { + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).post({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerDataSourceConnectionSerializer(dataSource), + }); +} + +export async function _createDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDataSourceConnectionDeserializer(result.body); +} + +/** Creates a new datasource. */ +export async function createDataSourceConnection( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + options: CreateDataSourceConnectionOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createDataSourceConnectionSend(context, dataSource, options); + return _createDataSourceConnectionDeserialize(result); +} + +export function _getDataSourceConnectionsSend( + context: Client, + options: GetDataSourceConnectionsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources{?api%2Dversion,%24select}", + { + "api%2Dversion": context.apiVersion, + "%24select": options?.select, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDataSourceConnectionsDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return listDataSourcesResultDeserializer(result.body); +} + +/** Lists all datasources available for a search service. */ +export async function getDataSourceConnections( + context: Client, + options: GetDataSourceConnectionsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDataSourceConnectionsSend(context, options); + return _getDataSourceConnectionsDeserialize(result); +} + +export function _getDataSourceConnectionSend( + context: Client, + dataSourceName: string, + options: GetDataSourceConnectionOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources('{dataSourceName}'){?api%2Dversion}", + { + dataSourceName: dataSourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).get({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + }); +} + +export async function _getDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDataSourceConnectionDeserializer(result.body); +} + +/** Retrieves a datasource definition. */ +export async function getDataSourceConnection( + context: Client, + dataSourceName: string, + options: GetDataSourceConnectionOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getDataSourceConnectionSend(context, dataSourceName, options); + return _getDataSourceConnectionDeserialize(result); +} + +export function _deleteDataSourceConnectionSend( + context: Client, + dataSourceName: string, + options: DeleteDataSourceConnectionOptionalParams = { requestOptions: {} }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources('{dataSourceName}'){?api%2Dversion}", + { + dataSourceName: dataSourceName, + "api%2Dversion": context.apiVersion, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).delete({ + ...operationOptionsToRequestParameters(options), + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + ...options.requestOptions?.headers, + }, + }); +} + +export async function _deleteDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["204", "404"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return; +} + +/** Deletes a datasource. */ +export async function deleteDataSourceConnection( + context: Client, + dataSourceName: string, + options: DeleteDataSourceConnectionOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteDataSourceConnectionSend(context, dataSourceName, options); + return _deleteDataSourceConnectionDeserialize(result); +} + +export function _createOrUpdateDataSourceConnectionSend( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + dataSourceName: string, + options: CreateOrUpdateDataSourceConnectionOptionalParams = { + requestOptions: {}, + }, +): StreamableMethod { + const path = expandUrlTemplate( + "/datasources('{dataSourceName}'){?api%2Dversion,ignoreResetRequirements}", + { + dataSourceName: dataSourceName, + "api%2Dversion": context.apiVersion, + ignoreResetRequirements: options?.skipIndexerResetRequirementForCache, + }, + { + allowReserved: options?.requestOptions?.skipUrlEncoding, + }, + ); + return context.path(path).put({ + ...operationOptionsToRequestParameters(options), + contentType: "application/json", + headers: { + ...(options?.ifMatch !== undefined ? { "If-Match": options?.ifMatch } : {}), + ...(options?.ifNoneMatch !== undefined ? { "If-None-Match": options?.ifNoneMatch } : {}), + prefer: "return=representation", + ...(options?.clientRequestId !== undefined + ? { "x-ms-client-request-id": options?.clientRequestId } + : {}), + accept: "application/json", + ...options.requestOptions?.headers, + }, + body: searchIndexerDataSourceConnectionSerializer(dataSource), + }); +} + +export async function _createOrUpdateDataSourceConnectionDeserialize( + result: PathUncheckedResponse, +): Promise { + const expectedStatuses = ["200", "201"]; + if (!expectedStatuses.includes(result.status)) { + const error = createRestError(result); + error.details = errorResponseDeserializer(result.body); + throw error; + } + + return searchIndexerDataSourceConnectionDeserializer(result.body); +} + +/** Creates a new datasource or updates a datasource if it already exists. */ +export async function createOrUpdateDataSourceConnection( + context: Client, + dataSource: SearchIndexerDataSourceConnection, + dataSourceName: string, + options: CreateOrUpdateDataSourceConnectionOptionalParams = { + requestOptions: {}, + }, +): Promise { + const result = await _createOrUpdateDataSourceConnectionSend( + context, + dataSource, + dataSourceName, + options, + ); + return _createOrUpdateDataSourceConnectionDeserialize(result); +} diff --git a/sdk/search/search-documents/src/searchIndexer/api/options.ts b/sdk/search/search-documents/src/searchIndexer/api/options.ts new file mode 100644 index 000000000000..cbfff4b16893 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndexer/api/options.ts @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { DocumentKeysOrIds } from "../../models/azure/search/documents/indexes/models.js"; +import { OperationOptions } from "@azure-rest/core-client"; + +/** Optional parameters. */ +export interface ResetSkillsOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateSkillsetOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSkillsetsOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetSkillsetOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteSkillsetOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateSkillsetOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Ignores cache reset requirements. */ + skipIndexerResetRequirementForCache?: boolean; + /** Disables cache reprocessing change detection. */ + disableCacheReprocessingChangeDetection?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexerStatusOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexersOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteIndexerOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateIndexerOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Ignores cache reset requirements. */ + skipIndexerResetRequirementForCache?: boolean; + /** Disables cache reprocessing change detection. */ + disableCacheReprocessingChangeDetection?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface RunIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ResetDocumentsOptionalParams extends OperationOptions { + /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */ + overwrite?: boolean; + /** The keys or ids of the documents to be re-ingested. If keys are provided, the document key field must be specified in the indexer configuration. If ids are provided, the document key field is ignored. */ + keysOrIds?: DocumentKeysOrIds; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ResyncOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface ResetIndexerOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateDataSourceConnectionOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetDataSourceConnectionsOptionalParams extends OperationOptions { + /** Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */ + select?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface GetDataSourceConnectionOptionalParams extends OperationOptions { + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface DeleteDataSourceConnectionOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} + +/** Optional parameters. */ +export interface CreateOrUpdateDataSourceConnectionOptionalParams extends OperationOptions { + /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ + ifMatch?: string; + /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ + ifNoneMatch?: string; + /** Ignores cache reset requirements. */ + skipIndexerResetRequirementForCache?: boolean; + /** An opaque, globally-unique, client-generated string identifier for the request. */ + clientRequestId?: string; +} diff --git a/sdk/search/search-documents/src/searchIndexer/api/searchIndexerContext.ts b/sdk/search/search-documents/src/searchIndexer/api/searchIndexerContext.ts new file mode 100644 index 000000000000..83dd21a71ee2 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndexer/api/searchIndexerContext.ts @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { logger } from "../../logger.js"; +import { KnownVersions } from "../../models/models.js"; +import { Client, ClientOptions, getClient } from "@azure-rest/core-client"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; + +export interface SearchIndexerContext extends Client { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion: string; +} + +/** Optional parameters for the client. */ +export interface SearchIndexerClientOptionalParams extends ClientOptions { + /** The API version to use for this operation. */ + /** Known values of {@link KnownVersions} that the service accepts. */ + apiVersion?: string; +} + +export function createSearchIndexer( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexerClientOptionalParams = {}, +): SearchIndexerContext { + const endpointUrl = options.endpoint ?? String(endpointParam); + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentInfo = `azsdk-js-search-documents/12.3.0-beta.1`; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-api ${userAgentInfo}` + : `azsdk-js-api ${userAgentInfo}`; + const { apiVersion: _, ...updatedOptions } = { + ...options, + userAgentOptions: { userAgentPrefix }, + loggingOptions: { logger: options.loggingOptions?.logger ?? logger.info }, + credentials: { + scopes: options.credentials?.scopes ?? ["https://search.azure.com/.default"], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const clientContext = getClient(endpointUrl, credential, updatedOptions); + clientContext.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + const apiVersion = options.apiVersion ?? "2025-11-01-preview"; + clientContext.pipeline.addPolicy({ + name: "ClientApiVersionPolicy", + sendRequest: (req, next) => { + // Use the apiVersion defined in request url directly + // Append one if there is no apiVersion and we have one at client options + const url = new URL(req.url); + if (!url.searchParams.get("api-version")) { + req.url = `${req.url}${ + Array.from(url.searchParams.keys()).length > 0 ? "&" : "?" + }api-version=${apiVersion}`; + } + + return next(req); + }, + }); + return { ...clientContext, apiVersion } as SearchIndexerContext; +} diff --git a/sdk/search/search-documents/src/searchIndexer/index.ts b/sdk/search/search-documents/src/searchIndexer/index.ts new file mode 100644 index 000000000000..79ebbbea79d6 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndexer/index.ts @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export { SearchIndexerClient } from "./searchIndexerClient.js"; +export { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, + SearchIndexerContext, + SearchIndexerClientOptionalParams, +} from "./api/index.js"; diff --git a/sdk/search/search-documents/src/searchIndexer/searchIndexerClient.ts b/sdk/search/search-documents/src/searchIndexer/searchIndexerClient.ts new file mode 100644 index 000000000000..3b7ebc8bf189 --- /dev/null +++ b/sdk/search/search-documents/src/searchIndexer/searchIndexerClient.ts @@ -0,0 +1,261 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + createSearchIndexer, + SearchIndexerContext, + SearchIndexerClientOptionalParams, +} from "./api/index.js"; +import { + SearchIndexerDataSourceConnection, + ListDataSourcesResult, + SearchIndexer, + ListIndexersResult, + SearchIndexerStatus, + SearchIndexerSkillset, + ListSkillsetsResult, + SkillNames, +} from "../models/azure/search/documents/indexes/models.js"; +import { + resetSkills, + createSkillset, + getSkillsets, + getSkillset, + deleteSkillset, + createOrUpdateSkillset, + getIndexerStatus, + createIndexer, + getIndexers, + getIndexer, + deleteIndexer, + createOrUpdateIndexer, + runIndexer, + resetDocuments, + resync, + resetIndexer, + createDataSourceConnection, + getDataSourceConnections, + getDataSourceConnection, + deleteDataSourceConnection, + createOrUpdateDataSourceConnection, +} from "./api/operations.js"; +import { + ResetSkillsOptionalParams, + CreateSkillsetOptionalParams, + GetSkillsetsOptionalParams, + GetSkillsetOptionalParams, + DeleteSkillsetOptionalParams, + CreateOrUpdateSkillsetOptionalParams, + GetIndexerStatusOptionalParams, + CreateIndexerOptionalParams, + GetIndexersOptionalParams, + GetIndexerOptionalParams, + DeleteIndexerOptionalParams, + CreateOrUpdateIndexerOptionalParams, + RunIndexerOptionalParams, + ResetDocumentsOptionalParams, + ResyncOptionalParams, + ResetIndexerOptionalParams, + CreateDataSourceConnectionOptionalParams, + GetDataSourceConnectionsOptionalParams, + GetDataSourceConnectionOptionalParams, + DeleteDataSourceConnectionOptionalParams, + CreateOrUpdateDataSourceConnectionOptionalParams, +} from "./api/options.js"; +import { KeyCredential, TokenCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; + +export { SearchIndexerClientOptionalParams } from "./api/searchIndexerContext.js"; + +export class SearchIndexerClient { + private _client: SearchIndexerContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: SearchIndexerClientOptionalParams = {}, + ) { + const prefixFromOptions = options?.userAgentOptions?.userAgentPrefix; + const userAgentPrefix = prefixFromOptions + ? `${prefixFromOptions} azsdk-js-client` + : `azsdk-js-client`; + this._client = createSearchIndexer(endpointParam, credential, { + ...options, + userAgentOptions: { userAgentPrefix }, + }); + this.pipeline = this._client.pipeline; + } + + /** Reset an existing skillset in a search service. */ + resetSkills( + skillNames: SkillNames, + skillsetName: string, + options: ResetSkillsOptionalParams = { requestOptions: {} }, + ): Promise { + return resetSkills(this._client, skillNames, skillsetName, options); + } + + /** Creates a new skillset in a search service. */ + createSkillset( + skillset: SearchIndexerSkillset, + options: CreateSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return createSkillset(this._client, skillset, options); + } + + /** List all skillsets in a search service. */ + getSkillsets( + options: GetSkillsetsOptionalParams = { requestOptions: {} }, + ): Promise { + return getSkillsets(this._client, options); + } + + /** Retrieves a skillset in a search service. */ + getSkillset( + skillsetName: string, + options: GetSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return getSkillset(this._client, skillsetName, options); + } + + /** Deletes a skillset in a search service. */ + deleteSkillset( + skillsetName: string, + options: DeleteSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteSkillset(this._client, skillsetName, options); + } + + /** Creates a new skillset in a search service or updates the skillset if it already exists. */ + createOrUpdateSkillset( + skillset: SearchIndexerSkillset, + skillsetName: string, + options: CreateOrUpdateSkillsetOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateSkillset(this._client, skillset, skillsetName, options); + } + + /** Returns the current status and execution history of an indexer. */ + getIndexerStatus( + indexerName: string, + options: GetIndexerStatusOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexerStatus(this._client, indexerName, options); + } + + /** Creates a new indexer. */ + createIndexer( + indexer: SearchIndexer, + options: CreateIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return createIndexer(this._client, indexer, options); + } + + /** Lists all indexers available for a search service. */ + getIndexers( + options: GetIndexersOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexers(this._client, options); + } + + /** Retrieves an indexer definition. */ + getIndexer( + indexerName: string, + options: GetIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return getIndexer(this._client, indexerName, options); + } + + /** Deletes an indexer. */ + deleteIndexer( + indexerName: string, + options: DeleteIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteIndexer(this._client, indexerName, options); + } + + /** Creates a new indexer or updates an indexer if it already exists. */ + createOrUpdateIndexer( + indexer: SearchIndexer, + indexerName: string, + options: CreateOrUpdateIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return createOrUpdateIndexer(this._client, indexer, indexerName, options); + } + + /** Runs an indexer on-demand. */ + runIndexer( + indexerName: string, + options: RunIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return runIndexer(this._client, indexerName, options); + } + + /** Resets specific documents in the datasource to be selectively re-ingested by the indexer. */ + resetDocuments( + indexerName: string, + options: ResetDocumentsOptionalParams = { requestOptions: {} }, + ): Promise { + return resetDocuments(this._client, indexerName, options); + } + + /** Resync selective options from the datasource to be re-ingested by the indexer." */ + resync( + indexerName: string, + options: ResyncOptionalParams = { requestOptions: {} }, + ): Promise { + return resync(this._client, indexerName, options); + } + + /** Resets the change tracking state associated with an indexer. */ + resetIndexer( + indexerName: string, + options: ResetIndexerOptionalParams = { requestOptions: {} }, + ): Promise { + return resetIndexer(this._client, indexerName, options); + } + + /** Creates a new datasource. */ + createDataSourceConnection( + dataSource: SearchIndexerDataSourceConnection, + options: CreateDataSourceConnectionOptionalParams = { requestOptions: {} }, + ): Promise { + return createDataSourceConnection(this._client, dataSource, options); + } + + /** Lists all datasources available for a search service. */ + getDataSourceConnections( + options: GetDataSourceConnectionsOptionalParams = { requestOptions: {} }, + ): Promise { + return getDataSourceConnections(this._client, options); + } + + /** Retrieves a datasource definition. */ + getDataSourceConnection( + dataSourceName: string, + options: GetDataSourceConnectionOptionalParams = { requestOptions: {} }, + ): Promise { + return getDataSourceConnection(this._client, dataSourceName, options); + } + + /** Deletes a datasource. */ + deleteDataSourceConnection( + dataSourceName: string, + options: DeleteDataSourceConnectionOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteDataSourceConnection(this._client, dataSourceName, options); + } + + /** Creates a new datasource or updates a datasource if it already exists. */ + createOrUpdateDataSourceConnection( + dataSource: SearchIndexerDataSourceConnection, + dataSourceName: string, + options: CreateOrUpdateDataSourceConnectionOptionalParams = { + requestOptions: {}, + }, + ): Promise { + return createOrUpdateDataSourceConnection(this._client, dataSource, dataSourceName, options); + } +} diff --git a/sdk/search/search-documents/src/searchIndexerClient.ts b/sdk/search/search-documents/src/searchIndexerClient.ts index 061887a9da18..2b8eb8378a74 100644 --- a/sdk/search/search-documents/src/searchIndexerClient.ts +++ b/sdk/search/search-documents/src/searchIndexerClient.ts @@ -3,12 +3,14 @@ import type { KeyCredential, TokenCredential } from "@azure/core-auth"; import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import type { Pipeline } from "@azure/core-rest-pipeline"; -import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import type { SearchIndexerStatus } from "./generated/service/models/index.js"; -import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient.js"; +import { + bearerTokenAuthenticationPolicy, + bearerTokenAuthenticationPolicyName, +} from "@azure/core-rest-pipeline"; +import type { SearchIndexerStatus } from "./models/azure/search/documents/indexes/index.js"; +import type { SearchIndexerClientOptionalParams } from "./searchIndexer/searchIndexerClient.js"; +import { SearchIndexerClient as GeneratedClient } from "./searchIndexer/searchIndexerClient.js"; import { logger } from "./logger.js"; import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; @@ -39,12 +41,13 @@ import type { SearchIndexerSkillset, } from "./serviceModels.js"; import * as utils from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +import { tracingClient } from "./tracing.js"; +import type { ClientOptions } from "@azure-rest/core-client"; /** * Client options used to configure AI Search API requests. */ -export interface SearchIndexerClientOptions extends ExtendedCommonClientOptions { +export interface SearchIndexerClientOptions extends ClientOptions { /** * The API version to use when communicating with the service. * @deprecated use {@Link serviceVersion} instead @@ -117,8 +120,9 @@ export class SearchIndexerClient { ) { this.endpoint = endpoint; - const internalClientPipelineOptions: InternalClientPipelineOptions = { + const internalClientPipelineOptions: SearchIndexerClientOptionalParams = { ...options, + apiVersion: options.serviceVersion ?? options.apiVersion ?? utils.defaultServiceVersion, ...{ loggingOptions: { logger: logger.info, @@ -134,17 +138,14 @@ export class SearchIndexerClient { }, }; - this.serviceVersion = - options.serviceVersion ?? options.apiVersion ?? utils.defaultServiceVersion; - this.apiVersion = this.serviceVersion; + this.serviceVersion = this.apiVersion = this.serviceVersion; - this.client = new GeneratedClient( - this.endpoint, - this.serviceVersion, - internalClientPipelineOptions, - ); + this.client = new GeneratedClient(this.endpoint, credential, internalClientPipelineOptions); this.pipeline = this.client.pipeline; + // TODO: consider leaving the policy in-place instead of removing and re-adding + this.pipeline.removePolicy({ name: bearerTokenAuthenticationPolicyName }); + if (isTokenCredential(credential)) { const scope: string = options.audience ? `${options.audience}/.default` @@ -165,19 +166,14 @@ export class SearchIndexerClient { * @param options - Options to the list indexers operation. */ public async listIndexers(options: ListIndexersOptions = {}): Promise> { - const { span, updatedOptions } = createSpan("SearchIndexerClient-listIndexers", options); - try { - const result = await this.client.indexers.list(updatedOptions); - return result.indexers.map(utils.generatedSearchIndexerToPublicSearchIndexer); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-listIndexers", + options, + async (updatedOptions) => { + const result = await this.client.getIndexers(updatedOptions); + return result.indexers.map(utils.generatedSearchIndexerToPublicSearchIndexer); + }, + ); } /** @@ -186,22 +182,17 @@ export class SearchIndexerClient { */ // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async listIndexersNames(options: ListIndexersOptions = {}): Promise> { - const { span, updatedOptions } = createSpan("SearchIndexerClient-listIndexersNames", options); - try { - const result = await this.client.indexers.list({ - ...updatedOptions, - select: "name", - }); - return result.indexers.map((idx) => idx.name); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-listIndexersNames", + options, + async (updatedOptions) => { + const result = await this.client.getIndexers({ + ...updatedOptions, + select: "name", + }); + return result.indexers.map((idx) => idx.name); + }, + ); } /** @@ -211,22 +202,14 @@ export class SearchIndexerClient { public async listDataSourceConnections( options: ListDataSourceConnectionsOptions = {}, ): Promise> { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-listDataSourceConnections", options, + async (updatedOptions) => { + const result = await this.client.getDataSourceConnections(updatedOptions); + return result.dataSources.map(utils.generatedDataSourceToPublicDataSource); + }, ); - try { - const result = await this.client.dataSources.list(updatedOptions); - return result.dataSources.map(utils.generatedDataSourceToPublicDataSource); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -237,25 +220,17 @@ export class SearchIndexerClient { // eslint-disable-next-line @azure/azure-sdk/ts-naming-options options: ListDataSourceConnectionsOptions = {}, ): Promise> { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-listDataSourceConnectionsNames", options, + async (updatedOptions) => { + const result = await this.client.getDataSourceConnections({ + ...updatedOptions, + select: "name", + }); + return result.dataSources.map((ds) => ds.name); + }, ); - try { - const result = await this.client.dataSources.list({ - ...updatedOptions, - select: "name", - }); - return result.dataSources.map((ds) => ds.name); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -265,19 +240,14 @@ export class SearchIndexerClient { public async listSkillsets( options: ListSkillsetsOptions = {}, ): Promise> { - const { span, updatedOptions } = createSpan("SearchIndexerClient-listSkillsets", options); - try { - const result = await this.client.skillsets.list(updatedOptions); - return result.skillsets.map(utils.generatedSkillsetToPublicSkillset); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-listSkillsets", + options, + async (updatedOptions) => { + const result = await this.client.getSkillsets(updatedOptions); + return result.skillsets.map(utils.generatedSkillsetToPublicSkillset); + }, + ); } /** @@ -286,22 +256,17 @@ export class SearchIndexerClient { */ // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async listSkillsetsNames(options: ListSkillsetsOptions = {}): Promise> { - const { span, updatedOptions } = createSpan("SearchIndexerClient-listSkillsetsNames", options); - try { - const result = await this.client.skillsets.list({ - ...updatedOptions, - select: "name", - }); - return result.skillsets.map((sks) => sks.name); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-listSkillsetsNames", + options, + async (updatedOptions) => { + const result = await this.client.getSkillsets({ + ...updatedOptions, + select: "name", + }); + return result.skillsets.map((sks) => sks.name); + }, + ); } /** @@ -313,19 +278,14 @@ export class SearchIndexerClient { indexerName: string, options: GetIndexerOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-getIndexer", options); - try { - const result = await this.client.indexers.get(indexerName, updatedOptions); - return utils.generatedSearchIndexerToPublicSearchIndexer(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-getIndexer", + options, + async (updatedOptions) => { + const result = await this.client.getIndexer(indexerName, updatedOptions); + return utils.generatedSearchIndexerToPublicSearchIndexer(result); + }, + ); } /** @@ -337,22 +297,17 @@ export class SearchIndexerClient { dataSourceConnectionName: string, options: GetDataSourceConnectionOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-getDataSourceConnection", options, + async (updatedOptions) => { + const result = await this.client.getDataSourceConnection( + dataSourceConnectionName, + updatedOptions, + ); + return utils.generatedDataSourceToPublicDataSource(result); + }, ); - try { - const result = await this.client.dataSources.get(dataSourceConnectionName, updatedOptions); - return utils.generatedDataSourceToPublicDataSource(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -364,19 +319,14 @@ export class SearchIndexerClient { skillsetName: string, options: GetSkillSetOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-getSkillset", options); - try { - const result = await this.client.skillsets.get(skillsetName, updatedOptions); - return utils.generatedSkillsetToPublicSkillset(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-getSkillset", + options, + async (updatedOptions) => { + const result = await this.client.getSkillset(skillsetName, updatedOptions); + return utils.generatedSkillsetToPublicSkillset(result); + }, + ); } /** @@ -388,22 +338,17 @@ export class SearchIndexerClient { indexer: SearchIndexer, options: CreateIndexerOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-createIndexer", options); - try { - const result = await this.client.indexers.create( - utils.publicSearchIndexerToGeneratedSearchIndexer(indexer), - updatedOptions, - ); - return utils.generatedSearchIndexerToPublicSearchIndexer(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-createIndexer", + options, + async (updatedOptions) => { + const result = await this.client.createIndexer( + utils.publicSearchIndexerToGeneratedSearchIndexer(indexer), + updatedOptions, + ); + return utils.generatedSearchIndexerToPublicSearchIndexer(result); + }, + ); } /** @@ -415,25 +360,17 @@ export class SearchIndexerClient { dataSourceConnection: SearchIndexerDataSourceConnection, options: CreateDataSourceConnectionOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-createDataSourceConnection", options, + async (updatedOptions) => { + const result = await this.client.createDataSourceConnection( + utils.publicDataSourceToGeneratedDataSource(dataSourceConnection), + updatedOptions, + ); + return utils.generatedDataSourceToPublicDataSource(result); + }, ); - try { - const result = await this.client.dataSources.create( - utils.publicDataSourceToGeneratedDataSource(dataSourceConnection), - updatedOptions, - ); - return utils.generatedDataSourceToPublicDataSource(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -445,22 +382,17 @@ export class SearchIndexerClient { skillset: SearchIndexerSkillset, options: CreateSkillsetOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-createSkillset", options); - try { - const result = await this.client.skillsets.create( - utils.publicSkillsetToGeneratedSkillset(skillset), - updatedOptions, - ); - return utils.generatedSkillsetToPublicSkillset(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-createSkillset", + options, + async (updatedOptions) => { + const result = await this.client.createSkillset( + utils.publicSkillsetToGeneratedSkillset(skillset), + updatedOptions, + ); + return utils.generatedSkillsetToPublicSkillset(result); + }, + ); } /** @@ -472,33 +404,23 @@ export class SearchIndexerClient { indexer: SearchIndexer, options: CreateorUpdateIndexerOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-createOrUpdateIndexer", options, + async (updatedOptions) => { + const { onlyIfUnchanged, ...restOptions } = updatedOptions; + const etag = onlyIfUnchanged ? indexer.etag : undefined; + const result = await this.client.createOrUpdateIndexer( + utils.publicSearchIndexerToGeneratedSearchIndexer(indexer), + indexer.name, + { + ...restOptions, + ifMatch: etag, + }, + ); + return utils.generatedSearchIndexerToPublicSearchIndexer(result); + }, ); - - const { onlyIfUnchanged, ...restOptions } = updatedOptions; - try { - const etag = onlyIfUnchanged ? indexer.etag : undefined; - - const result = await this.client.indexers.createOrUpdate( - indexer.name, - utils.publicSearchIndexerToGeneratedSearchIndexer(indexer), - { - ...restOptions, - ifMatch: etag, - }, - ); - return utils.generatedSearchIndexerToPublicSearchIndexer(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -510,31 +432,22 @@ export class SearchIndexerClient { dataSourceConnection: SearchIndexerDataSourceConnection, options: CreateorUpdateDataSourceConnectionOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-createOrUpdateDataSourceConnection", options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? dataSourceConnection.etag : undefined; + const result = await this.client.createOrUpdateDataSourceConnection( + utils.publicDataSourceToGeneratedDataSource(dataSourceConnection), + dataSourceConnection.name, + { + ...updatedOptions, + ifMatch: etag, + }, + ); + return utils.generatedDataSourceToPublicDataSource(result); + }, ); - try { - const etag = options.onlyIfUnchanged ? dataSourceConnection.etag : undefined; - - const result = await this.client.dataSources.createOrUpdate( - dataSourceConnection.name, - utils.publicDataSourceToGeneratedDataSource(dataSourceConnection), - { - ...updatedOptions, - ifMatch: etag, - }, - ); - return utils.generatedDataSourceToPublicDataSource(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -546,32 +459,22 @@ export class SearchIndexerClient { skillset: SearchIndexerSkillset, options: CreateOrUpdateSkillsetOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-createOrUpdateSkillset", options, + async (updatedOptions) => { + const etag = options.onlyIfUnchanged ? skillset.etag : undefined; + const result = await this.client.createOrUpdateSkillset( + utils.publicSkillsetToGeneratedSkillset(skillset), + skillset.name, + { + ...updatedOptions, + ifMatch: etag, + }, + ); + return utils.generatedSkillsetToPublicSkillset(result); + }, ); - try { - const etag = options.onlyIfUnchanged ? skillset.etag : undefined; - - const result = await this.client.skillsets.createOrUpdate( - skillset.name, - utils.publicSkillsetToGeneratedSkillset(skillset), - { - ...updatedOptions, - ifMatch: etag, - }, - ); - - return utils.generatedSkillsetToPublicSkillset(result); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -583,29 +486,24 @@ export class SearchIndexerClient { indexer: string | SearchIndexer, options: DeleteIndexerOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-deleteIndexer", options); - try { - const indexerName: string = typeof indexer === "string" ? indexer : indexer.name; - const etag = - typeof indexer === "string" - ? undefined - : options.onlyIfUnchanged - ? indexer.etag - : undefined; - - await this.client.indexers.delete(indexerName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-deleteIndexer", + options, + async (updatedOptions) => { + const indexerName: string = typeof indexer === "string" ? indexer : indexer.name; + const etag = + typeof indexer === "string" + ? undefined + : options.onlyIfUnchanged + ? indexer.etag + : undefined; + + await this.client.deleteIndexer(indexerName, { + ...updatedOptions, + ifMatch: etag, + }); + }, + ); } /** @@ -617,33 +515,27 @@ export class SearchIndexerClient { dataSourceConnection: string | SearchIndexerDataSourceConnection, options: DeleteDataSourceConnectionOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan( + return tracingClient.withSpan( "SearchIndexerClient-deleteDataSourceConnection", options, + async (updatedOptions) => { + const dataSourceConnectionName: string = + typeof dataSourceConnection === "string" + ? dataSourceConnection + : dataSourceConnection.name; + const etag = + typeof dataSourceConnection === "string" + ? undefined + : options.onlyIfUnchanged + ? dataSourceConnection.etag + : undefined; + + await this.client.deleteDataSourceConnection(dataSourceConnectionName, { + ...updatedOptions, + ifMatch: etag, + }); + }, ); - try { - const dataSourceConnectionName: string = - typeof dataSourceConnection === "string" ? dataSourceConnection : dataSourceConnection.name; - const etag = - typeof dataSourceConnection === "string" - ? undefined - : options.onlyIfUnchanged - ? dataSourceConnection.etag - : undefined; - - await this.client.dataSources.delete(dataSourceConnectionName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } } /** @@ -655,29 +547,24 @@ export class SearchIndexerClient { skillset: string | SearchIndexerSkillset, options: DeleteSkillsetOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-deleteSkillset", options); - try { - const skillsetName: string = typeof skillset === "string" ? skillset : skillset.name; - const etag = - typeof skillset === "string" - ? undefined - : options.onlyIfUnchanged - ? skillset.etag - : undefined; - - await this.client.skillsets.delete(skillsetName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-deleteSkillset", + options, + async (updatedOptions) => { + const skillsetName: string = typeof skillset === "string" ? skillset : skillset.name; + const etag = + typeof skillset === "string" + ? undefined + : options.onlyIfUnchanged + ? skillset.etag + : undefined; + + await this.client.deleteSkillset(skillsetName, { + ...updatedOptions, + ifMatch: etag, + }); + }, + ); } /** @@ -689,19 +576,13 @@ export class SearchIndexerClient { indexerName: string, options: GetIndexerStatusOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-getIndexerStatus", options); - try { - const result = await this.client.indexers.getStatus(indexerName, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-getIndexerStatus", + options, + async (updatedOptions) => { + return this.client.getIndexerStatus(indexerName, updatedOptions); + }, + ); } /** @@ -710,18 +591,13 @@ export class SearchIndexerClient { * @param options - Additional optional arguments. */ public async resetIndexer(indexerName: string, options: ResetIndexerOptions = {}): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-resetIndexer", options); - try { - await this.client.indexers.reset(indexerName, updatedOptions); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-resetIndexer", + options, + async (updatedOptions) => { + await this.client.resetIndexer(indexerName, updatedOptions); + }, + ); } /** @@ -730,18 +606,13 @@ export class SearchIndexerClient { * @param options - Additional optional arguments. */ public async runIndexer(indexerName: string, options: RunIndexerOptions = {}): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-runIndexer", options); - try { - await this.client.indexers.run(indexerName, updatedOptions); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-runIndexer", + options, + async (updatedOptions) => { + await this.client.runIndexer(indexerName, updatedOptions); + }, + ); } /** @@ -753,24 +624,19 @@ export class SearchIndexerClient { indexerName: string, options: ResetDocumentsOptions = {}, ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-resetDocs", options); - try { - await this.client.indexers.resetDocs(indexerName, { - ...updatedOptions, - keysOrIds: { - documentKeys: updatedOptions.documentKeys, - datasourceDocumentIds: updatedOptions.datasourceDocumentIds, - }, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-resetDocs", + options, + async (updatedOptions) => { + await this.client.resetDocuments(indexerName, { + ...updatedOptions, + keysOrIds: { + documentKeys: updatedOptions.documentKeys, + datasourceDocumentIds: updatedOptions.datasourceDocumentIds, + }, + }); + }, + ); } /** @@ -780,21 +646,16 @@ export class SearchIndexerClient { * @param options - The options parameters. */ public async resetSkills(skillsetName: string, options: ResetSkillsOptions = {}): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-resetSkills", options); - try { - await this.client.skillsets.resetSkills( - skillsetName, - { skillNames: options.skillNames }, - updatedOptions, - ); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } + return tracingClient.withSpan( + "SearchIndexerClient-resetSkills", + options, + async (updatedOptions) => { + await this.client.resetSkills( + { skillNames: options.skillNames }, + skillsetName, + updatedOptions, + ); + }, + ); } } diff --git a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts index 8f163e76c7c1..9f81a36aa2ab 100644 --- a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts +++ b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts @@ -1,11 +1,11 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -import type { OperationOptions } from "@azure/core-client"; +import type { OperationOptions } from "@azure-rest/core-client"; import type { RestError } from "@azure/core-rest-pipeline"; import { delay } from "@azure/core-util"; import EventEmitter from "node:events"; -import type { IndexDocumentsResult } from "./generated/data/models/index.js"; +import type { IndexDocumentsResult } from "./models/azure/search/documents/index.js"; import { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; import type { IndexDocumentsAction, diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index fa63678d0d5a..5a5f283515eb 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -1,11 +1,11 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -import type { OperationOptions } from "@azure/core-client"; -import type { PagedAsyncIterableIterator } from "@azure/core-paging"; +import type { OperationOptions } from "@azure-rest/core-client"; +import type { PagedAsyncIterableIterator } from "./static-helpers/pagingHelpers.js"; import type { AIFoundryModelCatalogName, - AIServices, + // AIServices, AIServicesAccountKey, AsciiFoldingTokenFilter, AzureMachineLearningSkill, @@ -13,7 +13,7 @@ import type { AzureOpenAITokenizerParameters, CognitiveServicesAccount as BaseCognitiveServicesAccount, KnowledgeBaseModel as BaseKnowledgeBaseModel, - KnowledgeSourceVectorizer as BaseKnowledgeSourceVectorizer, + // KnowledgeSourceVectorizer as BaseKnowledgeSourceVectorizer, SearchIndexerSkill as BaseSearchIndexerSkill, BinaryQuantizationCompression, BM25Similarity, @@ -48,15 +48,15 @@ import type { FieldMapping, FreshnessScoringFunction, HighWaterMarkChangeDetectionPolicy, - IndexedSharePointContainerName, + // IndexedSharePointContainerName, IndexerPermissionOption, IndexingSchedule, IndexProjectionMode, IndexStatisticsSummary, KeepTokenFilter, KeywordMarkerTokenFilter, - KnowledgeSourceContentExtractionMode, - KnowledgeSourceIngestionPermissionOption, + // KnowledgeSourceContentExtractionMode, + // KnowledgeSourceIngestionPermissionOption, KnownBlobIndexerDataToExtract, KnownBlobIndexerImageAction, KnownBlobIndexerParsingMode, @@ -101,13 +101,13 @@ import type { NativeBlobSoftDeleteDeletionDetectionPolicy, NGramTokenizer, OcrLineEnding, - PathHierarchyTokenizerV2 as PathHierarchyTokenizer, + PathHierarchyTokenizer, PatternCaptureTokenFilter, PatternReplaceCharFilter, PatternReplaceTokenFilter, PermissionFilter, PhoneticTokenFilter, - RemoteSharePointKnowledgeSourceParameters, + // RemoteSharePointKnowledgeSourceParameters, ScalarQuantizationCompression, ScoringFunctionAggregation, SearchAlias, @@ -118,7 +118,7 @@ import type { SearchIndexerKnowledgeStoreProjection, SearchIndexKnowledgeSourceParameters, SearchIndexPermissionFilterOption, - Suggester as SearchSuggester, + SearchSuggester, SemanticSearch, SentimentSkillV3, ServiceCounters, @@ -143,9 +143,14 @@ import type { VectorEncodingFormat, VectorSearchProfile, VectorSearchVectorizerKind, - WebKnowledgeSourceParameters, WordDelimiterTokenFilter, -} from "./generated/service/models/index.js"; +} from "./models/azure/search/documents/indexes/index.js"; +import type { + AIServices, + KnowledgeSourceContentExtractionMode, + KnowledgeSourceIngestionPermissionOption, + WebKnowledgeSourceParameters, +} from "./models/models.js"; import type { KnowledgeBase } from "./knowledgeBaseModels.js"; /** @@ -3272,7 +3277,7 @@ export interface IndexedSharePointKnowledgeSourceParameters { /** SharePoint connection string with format: SharePointOnlineEndpoint=[SharePoint site url];ApplicationId=[Azure AD App ID];ApplicationSecret=[Azure AD App client secret];TenantId=[SharePoint site tenant id] */ connectionString: string; /** Specifies which SharePoint libraries to access. */ - containerName: IndexedSharePointContainerName; + // containerName: IndexedSharePointContainerName; /** Optional query to filter SharePoint content. */ query?: string; /** Consolidates all general ingestion settings. */ @@ -3340,7 +3345,7 @@ export interface RemoteSharePointKnowledgeSource extends BaseKnowledgeSource { /** * The parameters for the knowledge source. */ - remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters; + // remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters; } /** Consolidates all general ingestion settings for knowledge sources. */ @@ -3376,7 +3381,7 @@ export interface KnowledgeBaseAzureOpenAIModel extends BaseKnowledgeBaseModel { export type KnowledgeSourceVectorizer = KnowledgeSourceAzureOpenAIVectorizer; /** Specifies the Azure OpenAI resource used to vectorize a query string. */ -export interface KnowledgeSourceAzureOpenAIVectorizer extends BaseKnowledgeSourceVectorizer { +export interface KnowledgeSourceAzureOpenAIVectorizer /* extends BaseKnowledgeSourceVectorizer */ { /** Polymorphic discriminator, which specifies the different types this object can be */ kind: "azureOpenAI"; /** Contains the parameters specific to Azure OpenAI embedding vectorization. */ diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index e3fe27ef48d7..f49a37d59bbc 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -4,9 +4,8 @@ import type { SearchResult as GeneratedSearchResult, SuggestDocumentsResult as GeneratedSuggestDocumentsResult, -} from "./generated/data/models/index.js"; +} from "./models/azure/search/documents/index.js"; import type { - CustomAnalyzer as BaseCustomAnalyzer, SearchIndexerKnowledgeStore as BaseSearchIndexerKnowledgeStore, BM25Similarity, ClassicSimilarity, @@ -16,31 +15,31 @@ import type { AIServicesAccountIdentity as GeneratedAIServicesAccountIdentity, AIServicesAccountKey as GeneratedAIServicesAccountKey, AIServicesVisionVectorizer as GeneratedAIServicesVisionVectorizer, - AMLParameters as GeneratedAMLParameters, - AMLVectorizer as GeneratedAMLVectorizer, + AzureMachineLearningParameters as GeneratedAMLParameters, + AzureMachineLearningVectorizer as GeneratedAMLVectorizer, AzureBlobKnowledgeSource as GeneratedAzureBlobKnowledgeSource, AzureBlobKnowledgeSourceParameters as GeneratedAzureBlobKnowledgeSourceParameters, - AzureOpenAIParameters as GeneratedAzureOpenAIParameters, + AzureOpenAiParameters as GeneratedAzureOpenAIParameters, AzureOpenAIVectorizer as GeneratedAzureOpenAIVectorizer, CognitiveServicesAccountKey as GeneratedCognitiveServicesAccountKey, DefaultCognitiveServicesAccount as GeneratedDefaultCognitiveServicesAccount, ExhaustiveKnnAlgorithmConfiguration as GeneratedExhaustiveKnnAlgorithmConfiguration, HnswAlgorithmConfiguration as GeneratedHnswAlgorithmConfiguration, - IndexedOneLakeKnowledgeSource as GeneratedIndexedOneLakeKnowledgeSource, - IndexedSharePointKnowledgeSource as GeneratedIndexedSharePointKnowledgeSource, + // IndexedOneLakeKnowledgeSource as GeneratedIndexedOneLakeKnowledgeSource, + // IndexedSharePointKnowledgeSource as GeneratedIndexedSharePointKnowledgeSource, KnowledgeBase as GeneratedKnowledgeBase, KnowledgeBaseAzureOpenAIModel as GeneratedKnowledgeBaseAzureOpenAIModel, KnowledgeBaseModelUnion as GeneratedKnowledgeBaseModel, KnowledgeSourceUnion as GeneratedKnowledgeSource, - KnowledgeSourceIngestionParameters as GeneratedKnowledgeSourceIngestionParameters, - KnowledgeSourceVectorizer as GeneratedKnowledgeSourceVectorizer, + // KnowledgeSourceIngestionParameters as GeneratedKnowledgeSourceIngestionParameters, + // KnowledgeSourceVectorizer as GeneratedKnowledgeSourceVectorizer, PatternAnalyzer as GeneratedPatternAnalyzer, - RemoteSharePointKnowledgeSource as GeneratedRemoteSharePointKnowledgeSource, + // RemoteSharePointKnowledgeSource as GeneratedRemoteSharePointKnowledgeSource, SearchField as GeneratedSearchField, SearchIndex as GeneratedSearchIndex, SearchIndexer as GeneratedSearchIndexer, SearchIndexerCache as GeneratedSearchIndexerCache, - SearchIndexerDataSource as GeneratedSearchIndexerDataSourceConnection, + // SearchIndexerDataSource as GeneratedSearchIndexerDataSourceConnection, SearchIndexerSkillset as GeneratedSearchIndexerSkillset, SearchIndexKnowledgeSource as GeneratedSearchIndexKnowledgeSource, SearchResourceEncryptionKey as GeneratedSearchResourceEncryptionKey, @@ -49,7 +48,7 @@ import type { VectorSearchAlgorithmConfigurationUnion as GeneratedVectorSearchAlgorithmConfiguration, VectorSearchVectorizerUnion as GeneratedVectorSearchVectorizer, WebApiVectorizer as GeneratedWebApiVectorizer, - WebKnowledgeSource as GeneratedWebKnowledgeSource, + // WebKnowledgeSource as GeneratedWebKnowledgeSource, HighWaterMarkChangeDetectionPolicy, LexicalAnalyzerUnion, LexicalTokenizerUnion, @@ -59,12 +58,12 @@ import type { SearchIndexerDataNoneIdentity, SearchIndexerDataUserAssignedIdentity, SearchIndexerSkillUnion, - SimilarityUnion, + SimilarityAlgorithmUnion as SimilarityUnion, SoftDeleteColumnDeletionDetectionPolicy, SqlIntegratedChangeTrackingPolicy, StopAnalyzer, TokenFilterUnion, -} from "./generated/service/models/index.js"; +} from "./models/azure/search/documents/indexes/index.js"; import type { SearchResult, SelectFields, @@ -128,6 +127,9 @@ import type { WebApiVectorizer, } from "./serviceModels.js"; import { isComplexField } from "./serviceModels.js"; +import { SearchIndexerDataSourceConnection as GeneratedSearchIndexerDataSourceConnection } from "./models/azure/search/documents/indexes/index.js"; +import { PagedAsyncIterableIterator } from "./static-helpers/pagingHelpers.js"; +import { KnowledgeSourceIngestionParameters as GeneratedKnowledgeSourceIngestionParameters } from "./models/models.js"; export const defaultServiceVersion = "2025-11-01-Preview"; @@ -267,7 +269,7 @@ function convertAnalyzersToGenerated( case "#Microsoft.Azure.Search.CustomAnalyzer": result.push({ ...analyzer, - tokenizerName: analyzer.tokenizerName, + tokenizer: analyzer.tokenizerName, }); break; } @@ -300,7 +302,7 @@ function convertAnalyzersToPublic( } as PatternAnalyzer); break; case "#Microsoft.Azure.Search.CustomAnalyzer": - result.push(analyzer as BaseCustomAnalyzer); + result.push(analyzer as LexicalAnalyzer); break; } } @@ -322,20 +324,15 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie return result; } else { const type: SearchFieldDataType = field.type as SearchFieldDataType; - const synonymMapNames: string[] | undefined = field.synonymMaps; + const synonymMapNames: string[] | undefined = field.synonymMapNames; - const { retrievable, analyzer, searchAnalyzer, indexAnalyzer, normalizer, ...restField } = - field; + const { retrievable, ...restField } = field; const hidden = typeof retrievable === "boolean" ? !retrievable : retrievable; const result: SimpleField = { ...restField, type, hidden, - analyzerName: analyzer, - searchAnalyzerName: searchAnalyzer, - indexAnalyzerName: indexAnalyzer, - normalizerName: normalizer, synonymMapNames, }; return result; @@ -410,9 +407,9 @@ function convertTokenizersToPublic( result.push({ ...tokenizer, flags, - }); + } as LexicalTokenizer); } else { - result.push(tokenizer); + result.push(tokenizer as LexicalTokenizer); } } return result; @@ -512,7 +509,7 @@ export function generatedIndexToPublicIndex(generatedIndex: GeneratedSearchIndex fields: convertFieldsToPublic(fields), similarity: convertSimilarityToPublic(similarity), vectorSearch: generatedVectorSearchToPublicVectorSearch(vectorSearch), - }; + } as SearchIndex; } export function generatedVectorSearchVectorizerToPublicVectorizer(): undefined; @@ -541,18 +538,18 @@ export function generatedVectorSearchVectorizerToPublicVectorizer( }, customWebApi: () => { - const { parameters } = generatedVectorizer as GeneratedWebApiVectorizer; - const authIdentity = convertSearchIndexerDataIdentityToPublic(parameters?.authIdentity); + const { webApiParameters } = generatedVectorizer as GeneratedWebApiVectorizer; + const authIdentity = convertSearchIndexerDataIdentityToPublic(webApiParameters?.authIdentity); const vectorizer: WebApiVectorizer = { ...(generatedVectorizer as GeneratedWebApiVectorizer), - parameters: { ...parameters, authIdentity }, + parameters: { ...webApiParameters, authIdentity }, }; return vectorizer; }, aiServicesVision: () => { const generatedVisionVectorizer = generatedVectorizer as GeneratedAIServicesVisionVectorizer; - const { aIServicesVisionParameters: generatedParameters } = generatedVisionVectorizer; + const { aiServicesVisionParameters: generatedParameters } = generatedVisionVectorizer; const parameters = generatedParameters ? { ...generatedParameters, @@ -576,7 +573,7 @@ export function generatedVectorSearchVectorizerToPublicVectorizer( ...generatedAMLVectorizer, amlParameters: generatedAzureMachineLearningVectorizerParametersToPublicAzureMachineLearningVectorizerParameters( - generatedAMLVectorizer.aMLParameters, + generatedAMLVectorizer.amlParameters, ), }; @@ -588,15 +585,20 @@ export function generatedVectorSearchVectorizerToPublicVectorizer( return generatedVectorizer as any; }; - return (knownVectorizerDeserializers[generatedVectorizer.kind] ?? defaultDeserializer)(); + return ( + knownVectorizerDeserializers[ + generatedVectorizer.kind as keyof typeof knownVectorizerDeserializers + ] ?? defaultDeserializer + )(); } export function generatedKnowledgeSourceVectorizerToPublicVectorizer(): undefined; export function generatedKnowledgeSourceVectorizerToPublicVectorizer( - generatedVectorizer: GeneratedKnowledgeSourceVectorizer, + generatedVectorizer: any, ): KnowledgeSourceVectorizer; export function generatedKnowledgeSourceVectorizerToPublicVectorizer( - generatedVectorizer?: GeneratedKnowledgeSourceVectorizer, + // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types + generatedVectorizer?: /* GeneratedKnowledgeSourceVectorizer */ any, ): KnowledgeSourceVectorizer | undefined { if (!generatedVectorizer) { return generatedVectorizer; @@ -621,7 +623,11 @@ export function generatedKnowledgeSourceVectorizerToPublicVectorizer( return generatedVectorizer as any; }; - return (knownVectorizerDeserializers[generatedVectorizer.kind] ?? defaultDeserializer)(); + return ( + knownVectorizerDeserializers[ + generatedVectorizer.kind as keyof typeof knownVectorizerDeserializers + ] ?? defaultDeserializer + )(); } function generatedAzureMachineLearningVectorizerParametersToPublicAzureMachineLearningVectorizerParameters( @@ -695,7 +701,7 @@ export function generatedVectorSearchToPublicVectorSearch( generatedVectorSearchAlgorithmConfigurationToPublicVectorSearchAlgorithmConfiguration, ), vectorizers: vectorSearch.vectorizers?.map(generatedVectorSearchVectorizerToPublicVectorizer), - }; + } as VectorSearch; } export function generatedSearchResultToPublicSearchResult< @@ -705,12 +711,12 @@ export function generatedSearchResultToPublicSearchResult< const returnValues: SearchResult[] = results.map>( (result) => { const { - _score: score, - _highlights: highlights, - _rerankerScore: rerankerScore, - _rerankerBoostedScore: rerankerBoostedScore, - _captions: captions, - _documentDebugInfo: documentDebugInfo, + score, + highlights, + rerankerScore, + rerankerBoostedScore, + captions, + documentDebugInfo, ...restProps } = result; const obj = { @@ -733,10 +739,10 @@ export function generatedSuggestDocumentsResultToPublicSuggestDocumentsResult< TFields extends SelectFields, >(searchDocumentsResult: GeneratedSuggestDocumentsResult): SuggestDocumentsResult { const results = searchDocumentsResult.results.map>((element) => { - const { _text, ...restProps } = element; + const { text, ...restProps } = element; const obj = { - text: _text, + text, document: restProps, }; @@ -802,7 +808,7 @@ export function generatedSynonymMapToPublicSynonymMap(synonymMap: GeneratedSynon const result: SynonymMap = { name: synonymMap.name, encryptionKey: convertEncryptionKeyToPublic(synonymMap.encryptionKey), - etag: synonymMap.etag, + etag: synonymMap.eTag, synonyms: [], }; @@ -818,7 +824,7 @@ export function publicSynonymMapToGeneratedSynonymMap(synonymMap: SynonymMap): G name: synonymMap.name, format: "solr", encryptionKey: convertEncryptionKeyToGenerated(synonymMap.encryptionKey), - etag: synonymMap.etag, + eTag: synonymMap.etag, synonyms: synonymMap.synonyms.join("\n"), }; @@ -883,7 +889,7 @@ export function publicDataSourceToGeneratedDataSource( }, container: dataSource.container, identity: dataSource.identity, - etag: dataSource.etag, + eTag: dataSource.etag, dataChangeDetectionPolicy: dataSource.dataChangeDetectionPolicy, dataDeletionDetectionPolicy: dataSource.dataDeletionDetectionPolicy, encryptionKey: convertEncryptionKeyToGenerated(dataSource.encryptionKey), @@ -900,7 +906,7 @@ export function generatedDataSourceToPublicDataSource( connectionString: dataSource.credentials.connectionString, container: dataSource.container, identity: convertSearchIndexerDataIdentityToPublic(dataSource.identity), - etag: dataSource.etag, + etag: dataSource.eTag, dataChangeDetectionPolicy: convertDataChangeDetectionPolicyToPublic( dataSource.dataChangeDetectionPolicy, ), @@ -996,7 +1002,7 @@ export function convertKnowledgeBaseToPublic(knowledgeBase: GeneratedKnowledgeBa return { ...knowledgeBase, - models: knowledgeBase.models.map((model) => convertKnowledgeBaseModelToPublic(model)), + models: knowledgeBase.models!.map((model) => convertKnowledgeBaseModelToPublic(model)), encryptionKey: convertEncryptionKeyToPublic(knowledgeBase.encryptionKey), }; } @@ -1015,8 +1021,8 @@ export function convertKnowledgeBaseToGenerated( } export function convertKnowledgeSourceToPublic( - knowledgeSource: GeneratedKnowledgeSource | undefined, -): KnowledgeSource | undefined { + knowledgeSource: GeneratedKnowledgeSource, +): KnowledgeSource { if (!knowledgeSource) { return knowledgeSource; } @@ -1024,22 +1030,28 @@ export function convertKnowledgeSourceToPublic( switch (knowledgeSource.kind) { case "searchIndex": { const { encryptionKey } = knowledgeSource as GeneratedSearchIndexKnowledgeSource; - return { ...knowledgeSource, encryptionKey: convertEncryptionKeyToPublic(encryptionKey) }; + return { + ...knowledgeSource, + encryptionKey: convertEncryptionKeyToPublic(encryptionKey), + } as KnowledgeSource; } case "azureBlob": { const { encryptionKey, azureBlobParameters } = knowledgeSource as GeneratedAzureBlobKnowledgeSource; return { ...knowledgeSource, + kind: "azureBlob", encryptionKey: convertEncryptionKeyToPublic(encryptionKey), - azureBlobParameters: convertAzureBlobKnowledgeSourceParametersToPublic(azureBlobParameters), + azureBlobParameters: + convertAzureBlobKnowledgeSourceParametersToPublic(azureBlobParameters)!, }; } case "indexedSharePoint": { const { encryptionKey, indexedSharePointParameters } = - knowledgeSource as GeneratedIndexedSharePointKnowledgeSource; + knowledgeSource as any; /* GeneratedIndexedSharePointKnowledgeSource; */ return { ...knowledgeSource, + kind: "indexedSharePoint", encryptionKey: convertEncryptionKeyToPublic(encryptionKey), indexedSharePointParameters: { ...indexedSharePointParameters, @@ -1051,9 +1063,10 @@ export function convertKnowledgeSourceToPublic( } case "indexedOneLake": { const { encryptionKey, indexedOneLakeParameters } = - knowledgeSource as GeneratedIndexedOneLakeKnowledgeSource; + knowledgeSource as any; /* GeneratedIndexedOneLakeKnowledgeSource; */ return { ...knowledgeSource, + kind: "indexedOneLake", encryptionKey: convertEncryptionKeyToPublic(encryptionKey), indexedOneLakeParameters: { ...indexedOneLakeParameters, @@ -1064,15 +1077,24 @@ export function convertKnowledgeSourceToPublic( }; } case "remoteSharePoint": { - const { encryptionKey } = knowledgeSource as GeneratedRemoteSharePointKnowledgeSource; + const { encryptionKey } = + knowledgeSource as any; /* GeneratedRemoteSharePointKnowledgeSource; */ return { ...knowledgeSource, + kind: "remoteSharePoint", encryptionKey: convertEncryptionKeyToPublic(encryptionKey), }; } case "web": { - const { encryptionKey } = knowledgeSource as GeneratedWebKnowledgeSource; - return { ...knowledgeSource, encryptionKey: convertEncryptionKeyToPublic(encryptionKey) }; + const { encryptionKey } = knowledgeSource as any; /* GeneratedWebKnowledgeSource */ + return { + ...knowledgeSource, + kind: "web", + encryptionKey: convertEncryptionKeyToPublic(encryptionKey), + }; + } + default: { + throw new Error("TODO: implement the default case for knowledge source conversion"); } } } @@ -1093,7 +1115,9 @@ function convertKnowledgeIngestionParametersToPublic( if (!params) { return params; } - const { embeddingModel, chatCompletionModel, identity, ...rest } = params; + + const { embeddingModel, chatCompletionModel, identity, ...rest } = + params as GeneratedAzureBlobKnowledgeSourceParameters; /* TODO: no idea why GeneratedKnowledgeSourceIngestionParameters does not have these */ return { ...rest, embeddingModel: !embeddingModel @@ -1112,11 +1136,7 @@ function convertAzureBlobKnowledgeSourceParametersToPublic( if (!params) { return params; } - const { ingestionParameters, ...rest } = params; - if (!ingestionParameters) { - return { ...rest }; - } - const { embeddingModel, chatCompletionModel, identity } = ingestionParameters; + const { embeddingModel, chatCompletionModel, identity, ...rest } = params; return { ...rest, embeddingModel: !embeddingModel @@ -1148,5 +1168,33 @@ function convertKnowledgeBaseModelToPublic(model: GeneratedKnowledgeBaseModel): function convertAzureOpenAIParametersToPublic( params: GeneratedAzureOpenAIParameters, ): AzureOpenAIParameters { - return { ...params, authIdentity: convertSearchIndexerDataIdentityToPublic(params.authIdentity) }; + return { + ...params, + authIdentity: convertSearchIndexerDataIdentityToPublic(params.authIdentity as any /* TODO */), + }; +} + +export function mapPagedAsyncIterable( + iter: PagedAsyncIterableIterator, + mapper: (x: T) => U, +): PagedAsyncIterableIterator { + return { + async next() { + const result = await iter.next(); + + return { + ...result, + value: result.value && mapper(result.value), + }; + }, + [Symbol.asyncIterator]() { + return this; + }, + async *byPage(settings) { + const iteratorByPage = iter.byPage(settings); + for await (const page of iteratorByPage) { + yield page.map(mapper); + } + }, + }; } diff --git a/sdk/search/search-documents/src/static-helpers/pagingHelpers.ts b/sdk/search/search-documents/src/static-helpers/pagingHelpers.ts new file mode 100644 index 000000000000..5a3472a3f0fe --- /dev/null +++ b/sdk/search/search-documents/src/static-helpers/pagingHelpers.ts @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { Client, createRestError, PathUncheckedResponse } from "@azure-rest/core-client"; +import { RestError } from "@azure/core-rest-pipeline"; + +/** + * Options for the byPage method + */ +export interface PageSettings { + /** + * A reference to a specific page to start iterating from. + */ + continuationToken?: string; +} + +/** + * An interface that describes a page of results. + */ +export type ContinuablePage = TPage & { + /** + * The token that keeps track of where to continue the iterator + */ + continuationToken?: string; +}; + +/** + * An interface that allows async iterable iteration both to completion and by page. + */ +export interface PagedAsyncIterableIterator< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, +> { + /** + * The next method, part of the iteration protocol + */ + next(): Promise>; + /** + * The connection to the async iterator, part of the iteration protocol + */ + [Symbol.asyncIterator](): PagedAsyncIterableIterator; + /** + * Return an AsyncIterableIterator that works a page at a time + */ + byPage: (settings?: TPageSettings) => AsyncIterableIterator>; +} + +/** + * An interface that describes how to communicate with the service. + */ +export interface PagedResult< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, +> { + /** + * Link to the first page of results. + */ + firstPageLink?: string; + /** + * A method that returns a page of results. + */ + getPage: (pageLink?: string) => Promise<{ page: TPage; nextPageLink?: string } | undefined>; + /** + * a function to implement the `byPage` method on the paged async iterator. + */ + byPage?: (settings?: TPageSettings) => AsyncIterableIterator>; + + /** + * A function to extract elements from a page. + */ + toElements?: (page: TPage) => TElement[]; +} + +/** + * Options for the paging helper + */ +export interface BuildPagedAsyncIteratorOptions { + itemName?: string; + nextLinkName?: string; + nextLinkMethod?: "GET" | "POST"; +} + +/** + * Helper to paginate results in a generic way and return a PagedAsyncIterableIterator + */ +export function buildPagedAsyncIterator< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, + TResponse extends PathUncheckedResponse = PathUncheckedResponse, +>( + client: Client, + getInitialResponse: () => PromiseLike, + processResponseBody: (result: TResponse) => PromiseLike, + expectedStatuses: string[], + options: BuildPagedAsyncIteratorOptions = {}, +): PagedAsyncIterableIterator { + const itemName = options.itemName ?? "value"; + const nextLinkName = options.nextLinkName ?? "nextLink"; + const nextLinkMethod = options.nextLinkMethod ?? "GET"; + const pagedResult: PagedResult = { + getPage: async (pageLink?: string) => { + const result = + pageLink === undefined + ? await getInitialResponse() + : nextLinkMethod === "POST" + ? await client.pathUnchecked(pageLink).post() + : await client.pathUnchecked(pageLink).get(); + checkPagingRequest(result, expectedStatuses); + const results = await processResponseBody(result as TResponse); + const nextLink = getNextLink(results, nextLinkName); + const values = getElements(results, itemName) as TPage; + return { + page: values, + nextPageLink: nextLink, + }; + }, + byPage: (settings?: TPageSettings) => { + const { continuationToken } = settings ?? {}; + return getPageAsyncIterator(pagedResult, { + pageLink: continuationToken, + }); + }, + }; + return getPagedAsyncIterator(pagedResult); +} + +/** + * returns an async iterator that iterates over results. It also has a `byPage` + * method that returns pages of items at once. + * + * @param pagedResult - an object that specifies how to get pages. + * @returns a paged async iterator that iterates over results. + */ + +function getPagedAsyncIterator< + TElement, + TPage = TElement[], + TPageSettings extends PageSettings = PageSettings, +>( + pagedResult: PagedResult, +): PagedAsyncIterableIterator { + const iter = getItemAsyncIterator(pagedResult); + return { + next() { + return iter.next(); + }, + [Symbol.asyncIterator]() { + return this; + }, + byPage: + pagedResult?.byPage ?? + ((settings?: TPageSettings) => { + const { continuationToken } = settings ?? {}; + return getPageAsyncIterator(pagedResult, { + pageLink: continuationToken, + }); + }), + }; +} + +async function* getItemAsyncIterator( + pagedResult: PagedResult, +): AsyncIterableIterator { + const pages = getPageAsyncIterator(pagedResult); + for await (const page of pages) { + yield* page as unknown as TElement[]; + } +} + +async function* getPageAsyncIterator( + pagedResult: PagedResult, + options: { + pageLink?: string; + } = {}, +): AsyncIterableIterator> { + const { pageLink } = options; + let response = await pagedResult.getPage(pageLink ?? pagedResult.firstPageLink); + if (!response) { + return; + } + let result = response.page as ContinuablePage; + result.continuationToken = response.nextPageLink; + yield result; + while (response.nextPageLink) { + response = await pagedResult.getPage(response.nextPageLink); + if (!response) { + return; + } + result = response.page as ContinuablePage; + result.continuationToken = response.nextPageLink; + yield result; + } +} + +/** + * Gets for the value of nextLink in the body + */ +function getNextLink(body: unknown, nextLinkName?: string): string | undefined { + if (!nextLinkName) { + return undefined; + } + + const nextLink = (body as Record)[nextLinkName]; + + if (typeof nextLink !== "string" && typeof nextLink !== "undefined" && nextLink !== null) { + throw new RestError( + `Body Property ${nextLinkName} should be a string or undefined or null but got ${typeof nextLink}`, + ); + } + + if (nextLink === null) { + return undefined; + } + + return nextLink; +} + +/** + * Gets the elements of the current request in the body. + */ +function getElements(body: unknown, itemName: string): T[] { + const value = (body as Record)[itemName] as T[]; + if (!Array.isArray(value)) { + throw new RestError( + `Couldn't paginate response\n Body doesn't contain an array property with name: ${itemName}`, + ); + } + + return value ?? []; +} + +/** + * Checks if a request failed + */ +function checkPagingRequest(response: PathUncheckedResponse, expectedStatuses: string[]): void { + if (!expectedStatuses.includes(response.status)) { + throw createRestError( + `Pagination failed with unexpected statusCode ${response.status}`, + response, + ); + } +} diff --git a/sdk/search/search-documents/src/static-helpers/serialization/serialize-record.ts b/sdk/search/search-documents/src/static-helpers/serialization/serialize-record.ts new file mode 100644 index 000000000000..f2d3a221fef7 --- /dev/null +++ b/sdk/search/search-documents/src/static-helpers/serialization/serialize-record.ts @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +export function serializeRecord(item: any, excludes?: string[], serializer?: (item: any) => any) { + excludes = excludes ?? []; + const res: any = {}; + for (const key of Object.keys(item)) { + if (excludes.includes(key) || item[key] === undefined) { + continue; + } + if (serializer) { + res[key] = serializer(item[key]); + } else { + res[key] = item[key] as any; + } + } + return res; +} diff --git a/sdk/search/search-documents/src/static-helpers/urlTemplate.ts b/sdk/search/search-documents/src/static-helpers/urlTemplate.ts new file mode 100644 index 000000000000..c7109898692a --- /dev/null +++ b/sdk/search/search-documents/src/static-helpers/urlTemplate.ts @@ -0,0 +1,227 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// --------------------- +// interfaces +// --------------------- +interface ValueOptions { + isFirst: boolean; // is first value in the expression + op?: string; // operator + varValue?: any; // variable value + varName?: string; // variable name + modifier?: string; // modifier e.g * + reserved?: boolean; // if true we'll keep reserved words with not encoding +} + +export interface UrlTemplateOptions { + // if set to true, reserved characters will not be encoded + allowReserved?: boolean; +} + +// --------------------- +// helpers +// --------------------- +function encodeComponent(val: string, reserved?: boolean, op?: string): string { + return (reserved ?? op === "+") || op === "#" + ? encodeReservedComponent(val) + : encodeRFC3986URIComponent(val); +} + +function encodeReservedComponent(str: string): string { + return str + .split(/(%[0-9A-Fa-f]{2})/g) + .map((part) => (!/%[0-9A-Fa-f]/.test(part) ? encodeURI(part) : part)) + .join(""); +} + +function encodeRFC3986URIComponent(str: string): string { + return encodeURIComponent(str).replace( + /[!'()*]/g, + (c) => `%${c.charCodeAt(0).toString(16).toUpperCase()}`, + ); +} + +function isDefined(val: any): boolean { + return val !== undefined && val !== null; +} + +function getNamedAndIfEmpty(op?: string): [boolean, string] { + return [!!op && [";", "?", "&"].includes(op), !!op && ["?", "&"].includes(op) ? "=" : ""]; +} + +function getFirstOrSep(op?: string, isFirst = false): string { + if (isFirst) { + return !op || op === "+" ? "" : op; + } else if (!op || op === "+" || op === "#") { + return ","; + } else if (op === "?") { + return "&"; + } else { + return op; + } +} + +function getExpandedValue(option: ValueOptions): string { + let isFirst = option.isFirst; + const { op, varName, varValue: value, reserved } = option; + const vals: string[] = []; + const [named, ifEmpty] = getNamedAndIfEmpty(op); + + if (Array.isArray(value)) { + for (const val of value.filter(isDefined)) { + // prepare the following parts: separator, varName, value + vals.push(`${getFirstOrSep(op, isFirst)}`); + if (named && varName) { + vals.push(`${encodeURIComponent(varName)}`); + if (val === "") { + vals.push(ifEmpty); + } else { + vals.push("="); + } + } + vals.push(encodeComponent(val, reserved, op)); + isFirst = false; + } + } else if (typeof value === "object") { + for (const key of Object.keys(value)) { + const val = value[key]; + if (!isDefined(val)) { + continue; + } + // prepare the following parts: separator, key, value + vals.push(`${getFirstOrSep(op, isFirst)}`); + if (key) { + vals.push(`${encodeURIComponent(key)}`); + if (named && val === "") { + vals.push(ifEmpty); + } else { + vals.push("="); + } + } + vals.push(encodeComponent(val, reserved, op)); + isFirst = false; + } + } + return vals.join(""); +} + +function getNonExpandedValue(option: ValueOptions): string | undefined { + const { op, varName, varValue: value, isFirst, reserved } = option; + const vals: string[] = []; + const first = getFirstOrSep(op, isFirst); + const [named, ifEmpty] = getNamedAndIfEmpty(op); + if (named && varName) { + vals.push(encodeComponent(varName, reserved, op)); + if (value === "") { + if (!ifEmpty) { + vals.push(ifEmpty); + } + return !vals.join("") ? undefined : `${first}${vals.join("")}`; + } + vals.push("="); + } + + const items = []; + if (Array.isArray(value)) { + for (const val of value.filter(isDefined)) { + items.push(encodeComponent(val, reserved, op)); + } + } else if (typeof value === "object") { + for (const key of Object.keys(value)) { + if (!isDefined(value[key])) { + continue; + } + items.push(encodeRFC3986URIComponent(key)); + items.push(encodeComponent(value[key], reserved, op)); + } + } + vals.push(items.join(",")); + return !vals.join(",") ? undefined : `${first}${vals.join("")}`; +} + +function getVarValue(option: ValueOptions): string | undefined { + const { op, varName, modifier, isFirst, reserved, varValue: value } = option; + + if (!isDefined(value)) { + return undefined; + } else if (["string", "number", "boolean"].includes(typeof value)) { + let val = value.toString(); + const [named, ifEmpty] = getNamedAndIfEmpty(op); + const vals: string[] = [getFirstOrSep(op, isFirst)]; + if (named && varName) { + // No need to encode varName considering it is already encoded + vals.push(varName); + if (val === "") { + vals.push(ifEmpty); + } else { + vals.push("="); + } + } + if (modifier && modifier !== "*") { + val = val.substring(0, parseInt(modifier, 10)); + } + vals.push(encodeComponent(val, reserved, op)); + return vals.join(""); + } else if (modifier === "*") { + return getExpandedValue(option); + } else { + return getNonExpandedValue(option); + } +} + +// --------------------------------------------------------------------------------------------------- +// This is an implementation of RFC 6570 URI Template: https://datatracker.ietf.org/doc/html/rfc6570. +// --------------------------------------------------------------------------------------------------- +export function expandUrlTemplate( + template: string, + context: Record, + option?: UrlTemplateOptions, +): string { + const result = template.replace(/\{([^{}]+)\}|([^{}]+)/g, (_, expr, text) => { + if (!expr) { + return encodeReservedComponent(text); + } + let op; + if (["+", "#", ".", "/", ";", "?", "&"].includes(expr[0])) { + op = expr[0]; + expr = expr.slice(1); + } + const varList = expr.split(/,/g); + const result = []; + for (const varSpec of varList) { + const varMatch = /([^:*]*)(?::(\d+)|(\*))?/.exec(varSpec); + if (!varMatch || !varMatch[1]) { + continue; + } + const varValue = getVarValue({ + isFirst: result.length === 0, + op, + varValue: context[varMatch[1]], + varName: varMatch[1], + modifier: varMatch[2] || varMatch[3], + reserved: option?.allowReserved, + }); + if (varValue) { + result.push(varValue); + } + } + return result.join(""); + }); + + return normalizeUnreserved(result); +} + +/** + * Normalize an expanded URI by decoding percent-encoded unreserved characters. + * RFC 3986 unreserved: "-" / "." / "~" + */ +function normalizeUnreserved(uri: string): string { + return uri.replace(/%([0-9A-Fa-f]{2})/g, (match, hex) => { + const char = String.fromCharCode(parseInt(hex, 16)); + // Decode only if it's unreserved + if (/[\-.~]/.test(char)) { + return char; + } + return match; // leave other encodings intact + }); +} diff --git a/sdk/search/search-documents/src/tracing.ts b/sdk/search/search-documents/src/tracing.ts index 6678b4bea44a..ca2495b0e105 100644 --- a/sdk/search/search-documents/src/tracing.ts +++ b/sdk/search/search-documents/src/tracing.ts @@ -7,7 +7,7 @@ import { createTracingClient } from "@azure/core-tracing"; * Creates a tracing client using the global tracer. * @internal */ -const tracingClient = createTracingClient({ +export const tracingClient = createTracingClient({ namespace: "Microsoft.Search", packageName: "Azure.Search", }); diff --git a/sdk/search/search-documents/tsconfig.json b/sdk/search/search-documents/tsconfig.json index d466f1460665..7e2de3350abe 100644 --- a/sdk/search/search-documents/tsconfig.json +++ b/sdk/search/search-documents/tsconfig.json @@ -2,16 +2,6 @@ "references": [ { "path": "./tsconfig.src.json" - }, - { - "path": "./tsconfig.samples.json" - }, - { - "path": "./tsconfig.test.json" - }, - { - "path": "./tsconfig.snippets.json" } - ], - "files": [] + ] } diff --git a/sdk/search/search-documents/tsp-location.yaml b/sdk/search/search-documents/tsp-location.yaml new file mode 100644 index 000000000000..0c1af3a8cdc4 --- /dev/null +++ b/sdk/search/search-documents/tsp-location.yaml @@ -0,0 +1,3 @@ +directory: specification/search/Azure.Search +commit: 5656333510e6c6b8c4027c73933349f1f16a0fe6 +repo: Azure/azure-rest-api-specs