From e15ec8ef22bad29bee2d9ada09b5d0c4e505dcb1 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Tue, 30 Sep 2025 15:21:41 +0000 Subject: [PATCH 01/21] initial restore --- .../search-documents/src/TROUBLESHOOTING.md | 16 +- .../search-documents/src/base64-browser.mts | 2 +- sdk/search/search-documents/src/base64.ts | 2 +- .../search-documents/src/errorModels.ts | 30 +- .../src/generated/data/index.ts | 6 +- .../src/generated/data/models/index.ts | 652 +--------- .../src/generated/data/models/mappers.ts | 511 -------- .../src/generated/data/models/parameters.ts | 58 +- .../generated/data/operations/documents.ts | 15 +- .../src/generated/data/operations/index.ts | 2 +- .../data/operationsInterfaces/documents.ts | 2 +- .../data/operationsInterfaces/index.ts | 2 +- .../src/generated/data/searchClient.ts | 15 +- .../src/generated/service/index.ts | 6 +- .../src/generated/service/models/index.ts | 1011 +++------------ .../src/generated/service/models/mappers.ts | 1134 +++-------------- .../generated/service/models/parameters.ts | 63 +- .../generated/service/operations/aliases.ts | 208 --- .../service/operations/dataSources.ts | 15 +- .../src/generated/service/operations/index.ts | 11 +- .../generated/service/operations/indexers.ts | 48 +- .../generated/service/operations/indexes.ts | 10 +- .../generated/service/operations/skillsets.ts | 51 +- .../service/operations/synonymMaps.ts | 10 +- .../service/operationsInterfaces/aliases.ts | 68 - .../operationsInterfaces/dataSources.ts | 2 +- .../service/operationsInterfaces/index.ts | 11 +- .../service/operationsInterfaces/indexers.ts | 12 +- .../service/operationsInterfaces/indexes.ts | 2 +- .../service/operationsInterfaces/skillsets.ts | 15 +- .../operationsInterfaces/synonymMaps.ts | 2 +- .../generated/service/searchServiceClient.ts | 22 +- .../search-documents/src/geographyPoint.ts | 2 +- sdk/search/search-documents/src/index.ts | 122 +- .../src/indexDocumentsBatch.ts | 8 +- .../search-documents/src/indexModels.ts | 138 +- sdk/search/search-documents/src/logger.ts | 2 +- sdk/search/search-documents/src/odata.ts | 8 +- .../src/odataMetadataPolicy.ts | 4 +- .../src/searchApiKeyCredentialPolicy.ts | 6 +- .../search-documents/src/searchAudience.ts | 2 +- .../search-documents/src/searchClient.ts | 202 +-- .../search-documents/src/searchIndexClient.ts | 212 +-- .../src/searchIndexerClient.ts | 104 +- .../src/searchIndexingBufferedSender.ts | 20 +- .../search-documents/src/serialization.ts | 6 +- .../search-documents/src/serviceModels.ts | 401 +----- .../search-documents/src/serviceUtils.ts | 265 +--- .../src/synonymMapHelper-browser.mts | 4 +- .../search-documents/src/synonymMapHelper.ts | 8 +- sdk/search/search-documents/src/tracing.ts | 2 +- sdk/search/search-documents/src/walk.ts | 2 +- sdk/search/search-documents/test/README.md | 8 +- .../test/internal/base64.spec.ts | 11 +- .../browser/synonymMap.browser.spec.ts | 11 +- .../test/internal/geographyPoint.spec.ts | 15 +- .../internal/node/synonymMap.node.spec.ts | 9 +- .../test/internal/serialization.spec.ts | 63 +- .../test/internal/serviceUtils.spec.ts | 41 +- .../search-documents/test/narrowedTypes.ts | 18 +- .../test/public/node/searchClient.spec.ts | 253 ++-- .../public/node/searchIndexClient.spec.ts | 64 +- .../test/public/odata.spec.ts | 12 +- .../test/public/typeDefinitions.ts | 18 +- .../test/public/utils/interfaces.ts | 4 +- .../test/public/utils/recordedClient.ts | 14 +- .../test/public/utils/setup.ts | 55 +- .../search-documents/test/snippets.spec.ts | 380 ------ 68 files changed, 1020 insertions(+), 5488 deletions(-) delete mode 100644 sdk/search/search-documents/src/generated/service/operations/aliases.ts delete mode 100644 sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts delete mode 100644 sdk/search/search-documents/test/snippets.spec.ts diff --git a/sdk/search/search-documents/src/TROUBLESHOOTING.md b/sdk/search/search-documents/src/TROUBLESHOOTING.md index efa5656e2bb0..679b79eaba26 100644 --- a/sdk/search/search-documents/src/TROUBLESHOOTING.md +++ b/sdk/search/search-documents/src/TROUBLESHOOTING.md @@ -1,7 +1,7 @@ # Troubleshooting Azure Cognitive Search SDK Issues The `azure-search-documents` package provides APIs for operations on the -[Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) cloud service. +[Azure Cognitive Search](https://docs.microsoft.com/azure/search/search-what-is-azure-search) cloud service. ## Table of Contents @@ -13,18 +13,18 @@ The `azure-search-documents` package provides APIs for operations on the ## Troubleshooting Issues By Response Code -See [this page](https://learn.microsoft.com/rest/api/searchservice/http-status-codes) for the common response status codes sent by the Azure Cognitive Search service. +See [this page](https://docs.microsoft.com/rest/api/searchservice/http-status-codes) for the common response status codes sent by the Azure Cognitive Search service. ### 207 Multi-Status -This response status indicates a partial success for an indexing operation. Some documents were successfully processed, but at least one failed. Details of the failed documents are present in the individual `IndexingResult` objects within the `IndexDocumentsResult`. If you want the [`indexDocuments`](https://learn.microsoft.com/javascript/api/@azure/search-documents/searchclient?view=azure-node-latest#@azure-search-documents-searchclient-indexdocuments) method call to throw an exception on any failure, set [`IndexDocumentsOptions.throwOnAnyError`](https://learn.microsoft.com/javascript/api/@azure/search-documents/indexdocumentsoptions?view=azure-node-latest#@azure-search-documents-indexdocumentsoptions-throwonanyfailure) +This response status indicates a partial success for an indexing operation. Some documents were successfully processed, but at least one failed. Details of the failed documents are present in the individual `IndexingResult` objects within the `IndexDocumentsResult`. If you want the [`indexDocuments`](https://docs.microsoft.com/javascript/api/@azure/search-documents/searchclient?view=azure-node-latest#@azure-search-documents-searchclient-indexdocuments) method call to throw an exception on any failure, set [`IndexDocumentsOptions.throwOnAnyError`](https://docs.microsoft.com/javascript/api/@azure/search-documents/indexdocumentsoptions?view=azure-node-latest#@azure-search-documents-indexdocumentsoptions-throwonanyfailure) to `true`. Each failure is then recorded in a separate `IndexingResult` and a single `IndexBatchException` is thrown by the method. ### 403 Forbidden Returned when you pass an invalid api-key. Search service uses two types of keys to control access: admin (read-write) and query (read-only). The **admin key** grants full rights to all operations, including the ability to manage the service, create and delete indexes, indexers, and data sources. The **query key** grants read-only access to indexes and documents. -Ensure that the key used for an API call provides sufficient privileges for the operation. See [here](https://learn.microsoft.com/azure/search/search-security-api-keys) +Ensure that the key used for an API call provides sufficient privileges for the operation. See [here](https://docs.microsoft.com/azure/search/search-security-api-keys) for details about managing API keys. If you are using the `azure-identity` package to authenticate requests to Azure Cognitive Search, please see our [troubleshooting guide](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/identity/identity/TROUBLESHOOTING.md). @@ -37,7 +37,7 @@ Returned when a resource does not exist on the server. If you are managing or qu If this error occurs while you are trying to create an index, it means you already have the maximum number of indexes allowed for your pricing tier. A count of the indexes stored in Azure Cognitive Search is visible in the search service dashboard on the [Azure portal](https://portal.azure.com/). To view the indexes by name, click the Index tile. -Alternatively, you can also get a list of the indexes by name using the [listIndexNames() method](https://learn.microsoft.com/javascript/api/@azure/search-documents/searchindexclient?view=azure-node-latest#@azure-search-documents-searchindexclient-listindexesnames). +Alternatively, you can also get a list of the indexes by name using the [listIndexNames() method](https://docs.microsoft.com/javascript/api/@azure/search-documents/searchindexclient?view=azure-node-latest#@azure-search-documents-searchindexclient-listindexesnames). If this error occurs during document upload, it indicates that you've exceeded your quota on the number of documents per index. You must either create a new index or upgrade for higher capacity limits. @@ -45,7 +45,9 @@ If this error occurs during document upload, it indicates that you've exceeded y A common class of issues when using the Search SDK is that the result set of a search query is different from what is expected. -For such cases, you should start by running the search query in the portal to rule out any service-side issues with the search query or any parameter(s). Review the [OData syntax](https://learn.microsoft.com/azure/search/query-odata-filter-orderby-syntax), if any, used in the query. +For such cases, you should start by running the search query in the portal to rule out any service-side issues with the search query or any parameter(s). Review the [OData syntax](https://docs.microsoft.com/azure/search/query-odata-filter-orderby-syntax), if any, used in the query. -Once the result looks good in the portal, use that as the template to populate the objects and parameters in the search request APIs. You should also verify that the correct set of documents have been indexed and are being searched on the service side. One tip would be to start with a 'broad' query (one that returns a superset of desired results, possibly by giving a large value for or entirely removing, some [query parameters](https://learn.microsoft.com/rest/api/searchservice/search-documents#query-parameters)) +Once the result looks good in the portal, use that as the template to populate the objects and parameters in the search request APIs. You should also verify that the correct set of documents have been indexed and are being searched on the service side. One tip would be to start with a 'broad' query (one that returns a superset of desired results, possibly by giving a large value for or entirely removing, some [query parameters](https://docs.microsoft.com/rest/api/searchservice/search-documents#query-parameters)) and then progressively refining the query till it expresses the desired intent. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-java%2Fsdk%2Fsearch%2Fazure-search-documents%2FTROUBLESHOOTING.png) diff --git a/sdk/search/search-documents/src/base64-browser.mts b/sdk/search/search-documents/src/base64-browser.mts index 06c360dc14df..204fbaf0dcee 100644 --- a/sdk/search/search-documents/src/base64-browser.mts +++ b/sdk/search/search-documents/src/base64-browser.mts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /** * Encodes a string in base64 format. diff --git a/sdk/search/search-documents/src/base64.ts b/sdk/search/search-documents/src/base64.ts index b8a6a71d98e3..c1be83174eff 100644 --- a/sdk/search/search-documents/src/base64.ts +++ b/sdk/search/search-documents/src/base64.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /** * Encodes a string in base64 format. diff --git a/sdk/search/search-documents/src/errorModels.ts b/sdk/search/search-documents/src/errorModels.ts index 10fadac5a09d..34723c353ef1 100644 --- a/sdk/search/search-documents/src/errorModels.ts +++ b/sdk/search/search-documents/src/errorModels.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /** * Common error response for all Azure Resource Manager APIs to return error details for failed @@ -13,28 +13,28 @@ export interface ErrorResponse { /** The error detail. */ export interface ErrorDetail { /** - * The error code. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The error code. NOTE: This property will not be serialized. It can only be populated by the + * server. */ readonly code?: string; /** - * The error message. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The error message. NOTE: This property will not be serialized. It can only be populated by the + * server. */ readonly message?: string; /** - * The error target. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The error target. NOTE: This property will not be serialized. It can only be populated by the + * server. */ readonly target?: string; /** - * The error details. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The error details. NOTE: This property will not be serialized. It can only be populated by the + * server. */ readonly details?: ErrorDetail[]; /** - * The error additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The error additional info. NOTE: This property will not be serialized. It can only be populated + * by the server. */ readonly additionalInfo?: ErrorAdditionalInfo[]; } @@ -42,13 +42,13 @@ export interface ErrorDetail { /** The resource management error additional info. */ export interface ErrorAdditionalInfo { /** - * The additional info type. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The additional info type. NOTE: This property will not be serialized. It can only be populated + * by the server. */ readonly type?: string; /** - * The additional info. - * NOTE: This property will not be serialized. It can only be populated by the server. + * The additional info. NOTE: This property will not be serialized. It can only be populated by + * the server. */ readonly info?: Record; } diff --git a/sdk/search/search-documents/src/generated/data/index.ts b/sdk/search/search-documents/src/generated/data/index.ts index 2bee12aaf341..f3df3736e075 100644 --- a/sdk/search/search-documents/src/generated/data/index.ts +++ b/sdk/search/search-documents/src/generated/data/index.ts @@ -6,6 +6,6 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./models/index.js"; -export { SearchClient } from "./searchClient.js"; -export * from "./operationsInterfaces/index.js"; +export * from "./models"; +export { SearchClient } from "./searchClient"; +export * from "./operationsInterfaces"; diff --git a/sdk/search/search-documents/src/generated/data/models/index.ts b/sdk/search/search-documents/src/generated/data/models/index.ts index 815148d18830..e7aebcf7b22d 100644 --- a/sdk/search/search-documents/src/generated/data/models/index.ts +++ b/sdk/search/search-documents/src/generated/data/models/index.ts @@ -12,13 +12,7 @@ import * as coreHttpCompat from "@azure/core-http-compat"; export type VectorQueryUnion = | VectorQuery | VectorizedQuery - | VectorizableTextQuery - | VectorizableImageUrlQuery - | VectorizableImageBinaryQuery; -export type VectorThresholdUnion = - | VectorThreshold - | VectorSimilarityThreshold - | SearchScoreThreshold; + | VectorizableTextQuery; /** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */ export interface ErrorResponse { @@ -116,16 +110,6 @@ export interface SearchDocumentsResult { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly semanticPartialResponseType?: SemanticSearchResultsType; - /** - * Type of query rewrite that was used to retrieve documents. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; - /** - * Debug information that applies to the search results as a whole. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly debugInfo?: DebugInfo; } /** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */ @@ -137,11 +121,6 @@ export interface FacetResult { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly count?: number; - /** - * The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly facets?: { [propertyName: string]: FacetResult[] }; } /** An answer is a text passage extracted from the contents of the most relevant documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. */ @@ -198,18 +177,12 @@ export interface SearchRequest { scoringParameters?: string[]; /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ scoringProfile?: string; - /** Enables a debugging tool that can be used to further explore your reranked results. */ - debug?: QueryDebugMode; /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ searchText?: string; /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ searchFields?: string; /** A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. */ searchMode?: SearchMode; - /** A value that specifies the language of the search query. */ - queryLanguage?: QueryLanguage; - /** A value that specified the type of the speller to use to spell-correct individual search query terms. */ - speller?: QuerySpellerType; /** The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. */ select?: string; /** The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead. */ @@ -228,22 +201,16 @@ export interface SearchRequest { answers?: QueryAnswerType; /** A value that specifies whether captions should be returned as part of the search response. */ captions?: QueryCaptionType; - /** A value that specifies whether query rewrites should be generated to augment the search query. */ - queryRewrites?: QueryRewritesType; - /** The comma-separated list of field names used for semantic ranking. */ - semanticFields?: string; /** The query parameters for vector and hybrid search queries. */ vectorQueries?: VectorQueryUnion[]; /** Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. */ vectorFilterMode?: VectorFilterMode; - /** The query parameters to configure hybrid search behaviors. */ - hybridSearch?: HybridSearch; } /** The query parameters for vector and hybrid search queries. */ export interface VectorQuery { /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vector" | "text" | "imageUrl" | "imageBinary"; + kind: "vector" | "text"; /** Number of nearest neighbors to return as top hits. */ kNearestNeighborsCount?: number; /** Vector Fields of type Collection(Edm.Single) to be included in the vector searched. */ @@ -254,24 +221,6 @@ export interface VectorQuery { oversampling?: number; /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */ weight?: number; - /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */ - threshold?: VectorThresholdUnion; - /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead. */ - filterOverride?: string; -} - -/** The threshold used for vector queries. */ -export interface VectorThreshold { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vectorSimilarity" | "searchScore"; -} - -/** TThe query parameters to configure hybrid search behaviors. */ -export interface HybridSearch { - /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */ - maxTextRecallSize?: number; - /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */ - countAndFacetMode?: HybridCountAndFacetMode; } /** Contains a document found by a search query, plus associated metadata. */ @@ -298,11 +247,6 @@ export interface SearchResult { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly _captions?: QueryCaptionResult[]; - /** - * Contains debugging information that can be used to further explore your search results. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentDebugInfo?: DocumentDebugInfo; } /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */ @@ -321,163 +265,6 @@ export interface QueryCaptionResult { readonly highlights?: string; } -/** Contains debugging information that can be used to further explore your search results. */ -export interface DocumentDebugInfo { - /** - * Contains debugging information specific to semantic ranking requests. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semantic?: SemanticDebugInfo; - /** - * Contains debugging information specific to vector and hybrid search. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: VectorsDebugInfo; -} - -export interface SemanticDebugInfo { - /** - * The title field that was sent to the semantic enrichment process, as well as how it was used - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly titleField?: QueryResultDocumentSemanticField; - /** - * The content fields that were sent to the semantic enrichment process, as well as how they were used - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly contentFields?: QueryResultDocumentSemanticField[]; - /** - * The keyword fields that were sent to the semantic enrichment process, as well as how they were used - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly keywordFields?: QueryResultDocumentSemanticField[]; - /** - * The raw concatenated strings that were sent to the semantic enrichment process. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly rerankerInput?: QueryResultDocumentRerankerInput; -} - -/** Description of fields that were sent to the semantic enrichment process, as well as how they were used */ -export interface QueryResultDocumentSemanticField { - /** - * The name of the field that was sent to the semantic enrichment process - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly name?: string; - /** - * The way the field was used for the semantic enrichment process (fully used, partially used, or unused) - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly state?: SemanticFieldState; -} - -/** The raw concatenated strings that were sent to the semantic enrichment process. */ -export interface QueryResultDocumentRerankerInput { - /** - * The raw string for the title field that was used for semantic enrichment. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly title?: string; - /** - * The raw concatenated strings for the content fields that were used for semantic enrichment. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly content?: string; - /** - * The raw concatenated strings for the keyword fields that were used for semantic enrichment. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly keywords?: string; -} - -export interface VectorsDebugInfo { - /** - * The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly subscores?: QueryResultDocumentSubscores; -} - -/** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */ -export interface QueryResultDocumentSubscores { - /** - * The BM25 or Classic score for the text portion of the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text?: TextResult; - /** - * The vector similarity and @search.score values for each vector query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: { [propertyName: string]: SingleVectorFieldResult }[]; - /** - * The BM25 or Classic score for the text portion of the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentBoost?: number; -} - -/** The BM25 or Classic score for the text portion of the query. */ -export interface TextResult { - /** - * The BM25 or Classic score for the text portion of the query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly searchScore?: number; -} - -/** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */ -export interface SingleVectorFieldResult { - /** - * The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly searchScore?: number; - /** - * The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectorSimilarity?: number; -} - -/** Contains debugging information that can be used to further explore your search results. */ -export interface DebugInfo { - /** - * Contains debugging information specific to query rewrites. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly queryRewrites?: QueryRewritesDebugInfo; -} - -/** Contains debugging information specific to query rewrites. */ -export interface QueryRewritesDebugInfo { - /** - * List of query rewrites generated for the text query. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly text?: QueryRewritesValuesDebugInfo; - /** - * List of query rewrites generated for the vectorizable text queries. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: QueryRewritesValuesDebugInfo[]; -} - -/** Contains debugging information specific to query rewrites. */ -export interface QueryRewritesValuesDebugInfo { - /** - * The input text to the generative query rewriting model. There may be cases where the user query and the input to the generative model are not identical. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly inputQuery?: string; - /** - * List of query rewrites. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly rewrites?: string[]; -} - /** Response containing suggestion query results from an index. */ export interface SuggestDocumentsResult { /** @@ -642,40 +429,6 @@ export interface VectorizableTextQuery extends VectorQuery { kind: "text"; /** The text to be vectorized to perform a vector search query. */ text: string; - /** Can be configured to let a generative model rewrite the query before sending it to be vectorized. */ - queryRewrites?: QueryRewritesType; -} - -/** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */ -export interface VectorizableImageUrlQuery extends VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "imageUrl"; - /** The URL of an image to be vectorized to perform a vector search query. */ - url?: string; -} - -/** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */ -export interface VectorizableImageBinaryQuery extends VectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "imageBinary"; - /** The base 64 encoded binary of an image to be vectorized to perform a vector search query. */ - base64Image?: string; -} - -/** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ -export interface VectorSimilarityThreshold extends VectorThreshold { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "vectorSimilarity"; - /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ - value: number; -} - -/** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ -export interface SearchScoreThreshold extends VectorThreshold { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "searchScore"; - /** The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ - value: number; } /** Parameter group */ @@ -722,22 +475,12 @@ export interface SearchOptions { semanticErrorHandling?: SemanticErrorMode; /** Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails. */ semanticMaxWaitInMilliseconds?: number; - /** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ + /** This parameter is only valid if the query type is `semantic`. If set, the query returns answers extracted from key passages in the highest ranked documents. The number of answers returned can be configured by appending the pipe character `|` followed by the `count-` option after the answers parameter value, such as `extractive|count-3`. Default count is 1. The confidence threshold can be configured by appending the pipe character `|` followed by the `threshold-` option after the answers parameter value, such as `extractive|threshold-0.9`. Default threshold is 0.7. */ answers?: QueryAnswerType; - /** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. The maximum character length of captions can be configured by appending the pipe character '|' followed by the 'count-', such as 'extractive|maxcharlength-600'. */ + /** This parameter is only valid if the query type is `semantic`. If set, the query returns captions extracted from key passages in the highest ranked documents. When Captions is set to `extractive`, highlighting is enabled by default, and can be configured by appending the pipe character `|` followed by the `highlight-` option, such as `extractive|highlight-true`. Defaults to `None`. */ captions?: QueryCaptionType; /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ semanticQuery?: string; - /** When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will produce 10 (default) rewrites to help increase the recall of the request. The requested count can be configured by appending the pipe character `|` followed by the `count-` option, such as `generative|count-3`. Defaults to `None`. This parameter is only valid if the query type is `semantic`. */ - queryRewrites?: QueryRewritesType; - /** Enables a debugging tool that can be used to further explore your search results. */ - debug?: QueryDebugMode; - /** The language of the query. */ - queryLanguage?: QueryLanguage; - /** Improve search recall by spell-correcting individual search query terms. */ - speller?: QuerySpellerType; - /** The list of field names used for semantic ranking. */ - semanticFields?: string[]; } /** Parameter group */ @@ -782,20 +525,20 @@ export interface AutocompleteOptions { top?: number; } -/** Known values of {@link ApiVersion20241101Preview} that the service accepts. */ -export enum KnownApiVersion20241101Preview { - /** Api Version '2024-11-01-preview' */ - TwoThousandTwentyFour1101Preview = "2024-11-01-preview", +/** Known values of {@link ApiVersion20240701} that the service accepts. */ +export enum KnownApiVersion20240701 { + /** Api Version '2024-07-01' */ + TwoThousandTwentyFour0701 = "2024-07-01", } /** - * Defines values for ApiVersion20241101Preview. \ - * {@link KnownApiVersion20241101Preview} can be used interchangeably with ApiVersion20241101Preview, + * Defines values for ApiVersion20240701. \ + * {@link KnownApiVersion20240701} can be used interchangeably with ApiVersion20240701, * this enum contains the known values that the service supports. * ### Known values supported by the service - * **2024-11-01-preview**: Api Version '2024-11-01-preview' + * **2024-07-01**: Api Version '2024-07-01' */ -export type ApiVersion20241101Preview = string; +export type ApiVersion20240701 = string; /** Known values of {@link SemanticErrorMode} that the service accepts. */ export enum KnownSemanticErrorMode { @@ -851,307 +594,12 @@ export enum KnownQueryCaptionType { */ export type QueryCaptionType = string; -/** Known values of {@link QueryRewritesType} that the service accepts. */ -export enum KnownQueryRewritesType { - /** Do not generate additional query rewrites for this query. */ - None = "none", - /** Generate alternative query terms to increase the recall of a search request. */ - Generative = "generative", -} - -/** - * Defines values for QueryRewritesType. \ - * {@link KnownQueryRewritesType} can be used interchangeably with QueryRewritesType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Do not generate additional query rewrites for this query. \ - * **generative**: Generate alternative query terms to increase the recall of a search request. - */ -export type QueryRewritesType = string; - -/** Known values of {@link QueryDebugMode} that the service accepts. */ -export enum KnownQueryDebugMode { - /** No query debugging information will be returned. */ - Disabled = "disabled", - /** Allows the user to further explore their reranked results. */ - Semantic = "semantic", - /** Allows the user to further explore their hybrid and vector query results. */ - Vector = "vector", - /** Allows the user to explore the list of query rewrites generated for their search request. */ - QueryRewrites = "queryRewrites", - /** Turn on all debug options. */ - All = "all", -} - -/** - * Defines values for QueryDebugMode. \ - * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **disabled**: No query debugging information will be returned. \ - * **semantic**: Allows the user to further explore their reranked results. \ - * **vector**: Allows the user to further explore their hybrid and vector query results. \ - * **queryRewrites**: Allows the user to explore the list of query rewrites generated for their search request. \ - * **all**: Turn on all debug options. - */ -export type QueryDebugMode = string; - -/** Known values of {@link QueryLanguage} that the service accepts. */ -export enum KnownQueryLanguage { - /** Query language not specified. */ - None = "none", - /** Query language value for English (United States). */ - EnUs = "en-us", - /** Query language value for English (Great Britain). */ - EnGb = "en-gb", - /** Query language value for English (India). */ - EnIn = "en-in", - /** Query language value for English (Canada). */ - EnCa = "en-ca", - /** Query language value for English (Australia). */ - EnAu = "en-au", - /** Query language value for French (France). */ - FrFr = "fr-fr", - /** Query language value for French (Canada). */ - FrCa = "fr-ca", - /** Query language value for German (Germany). */ - DeDe = "de-de", - /** Query language value for Spanish (Spain). */ - EsEs = "es-es", - /** Query language value for Spanish (Mexico). */ - EsMx = "es-mx", - /** Query language value for Chinese (China). */ - ZhCn = "zh-cn", - /** Query language value for Chinese (Taiwan). */ - ZhTw = "zh-tw", - /** Query language value for Portuguese (Brazil). */ - PtBr = "pt-br", - /** Query language value for Portuguese (Portugal). */ - PtPt = "pt-pt", - /** Query language value for Italian (Italy). */ - ItIt = "it-it", - /** Query language value for Japanese (Japan). */ - JaJp = "ja-jp", - /** Query language value for Korean (Korea). */ - KoKr = "ko-kr", - /** Query language value for Russian (Russia). */ - RuRu = "ru-ru", - /** Query language value for Czech (Czech Republic). */ - CsCz = "cs-cz", - /** Query language value for Dutch (Belgium). */ - NlBe = "nl-be", - /** Query language value for Dutch (Netherlands). */ - NlNl = "nl-nl", - /** Query language value for Hungarian (Hungary). */ - HuHu = "hu-hu", - /** Query language value for Polish (Poland). */ - PlPl = "pl-pl", - /** Query language value for Swedish (Sweden). */ - SvSe = "sv-se", - /** Query language value for Turkish (Turkey). */ - TrTr = "tr-tr", - /** Query language value for Hindi (India). */ - HiIn = "hi-in", - /** Query language value for Arabic (Saudi Arabia). */ - ArSa = "ar-sa", - /** Query language value for Arabic (Egypt). */ - ArEg = "ar-eg", - /** Query language value for Arabic (Morocco). */ - ArMa = "ar-ma", - /** Query language value for Arabic (Kuwait). */ - ArKw = "ar-kw", - /** Query language value for Arabic (Jordan). */ - ArJo = "ar-jo", - /** Query language value for Danish (Denmark). */ - DaDk = "da-dk", - /** Query language value for Norwegian (Norway). */ - NoNo = "no-no", - /** Query language value for Bulgarian (Bulgaria). */ - BgBg = "bg-bg", - /** Query language value for Croatian (Croatia). */ - HrHr = "hr-hr", - /** Query language value for Croatian (Bosnia and Herzegovina). */ - HrBa = "hr-ba", - /** Query language value for Malay (Malaysia). */ - MsMy = "ms-my", - /** Query language value for Malay (Brunei Darussalam). */ - MsBn = "ms-bn", - /** Query language value for Slovenian (Slovenia). */ - SlSl = "sl-sl", - /** Query language value for Tamil (India). */ - TaIn = "ta-in", - /** Query language value for Vietnamese (Viet Nam). */ - ViVn = "vi-vn", - /** Query language value for Greek (Greece). */ - ElGr = "el-gr", - /** Query language value for Romanian (Romania). */ - RoRo = "ro-ro", - /** Query language value for Icelandic (Iceland). */ - IsIs = "is-is", - /** Query language value for Indonesian (Indonesia). */ - IdId = "id-id", - /** Query language value for Thai (Thailand). */ - ThTh = "th-th", - /** Query language value for Lithuanian (Lithuania). */ - LtLt = "lt-lt", - /** Query language value for Ukrainian (Ukraine). */ - UkUa = "uk-ua", - /** Query language value for Latvian (Latvia). */ - LvLv = "lv-lv", - /** Query language value for Estonian (Estonia). */ - EtEe = "et-ee", - /** Query language value for Catalan. */ - CaEs = "ca-es", - /** Query language value for Finnish (Finland). */ - FiFi = "fi-fi", - /** Query language value for Serbian (Bosnia and Herzegovina). */ - SrBa = "sr-ba", - /** Query language value for Serbian (Montenegro). */ - SrMe = "sr-me", - /** Query language value for Serbian (Serbia). */ - SrRs = "sr-rs", - /** Query language value for Slovak (Slovakia). */ - SkSk = "sk-sk", - /** Query language value for Norwegian (Norway). */ - NbNo = "nb-no", - /** Query language value for Armenian (Armenia). */ - HyAm = "hy-am", - /** Query language value for Bengali (India). */ - BnIn = "bn-in", - /** Query language value for Basque. */ - EuEs = "eu-es", - /** Query language value for Galician. */ - GlEs = "gl-es", - /** Query language value for Gujarati (India). */ - GuIn = "gu-in", - /** Query language value for Hebrew (Israel). */ - HeIl = "he-il", - /** Query language value for Irish (Ireland). */ - GaIe = "ga-ie", - /** Query language value for Kannada (India). */ - KnIn = "kn-in", - /** Query language value for Malayalam (India). */ - MlIn = "ml-in", - /** Query language value for Marathi (India). */ - MrIn = "mr-in", - /** Query language value for Persian (U.A.E.). */ - FaAe = "fa-ae", - /** Query language value for Punjabi (India). */ - PaIn = "pa-in", - /** Query language value for Telugu (India). */ - TeIn = "te-in", - /** Query language value for Urdu (Pakistan). */ - UrPk = "ur-pk", -} - -/** - * Defines values for QueryLanguage. \ - * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Query language not specified. \ - * **en-us**: Query language value for English (United States). \ - * **en-gb**: Query language value for English (Great Britain). \ - * **en-in**: Query language value for English (India). \ - * **en-ca**: Query language value for English (Canada). \ - * **en-au**: Query language value for English (Australia). \ - * **fr-fr**: Query language value for French (France). \ - * **fr-ca**: Query language value for French (Canada). \ - * **de-de**: Query language value for German (Germany). \ - * **es-es**: Query language value for Spanish (Spain). \ - * **es-mx**: Query language value for Spanish (Mexico). \ - * **zh-cn**: Query language value for Chinese (China). \ - * **zh-tw**: Query language value for Chinese (Taiwan). \ - * **pt-br**: Query language value for Portuguese (Brazil). \ - * **pt-pt**: Query language value for Portuguese (Portugal). \ - * **it-it**: Query language value for Italian (Italy). \ - * **ja-jp**: Query language value for Japanese (Japan). \ - * **ko-kr**: Query language value for Korean (Korea). \ - * **ru-ru**: Query language value for Russian (Russia). \ - * **cs-cz**: Query language value for Czech (Czech Republic). \ - * **nl-be**: Query language value for Dutch (Belgium). \ - * **nl-nl**: Query language value for Dutch (Netherlands). \ - * **hu-hu**: Query language value for Hungarian (Hungary). \ - * **pl-pl**: Query language value for Polish (Poland). \ - * **sv-se**: Query language value for Swedish (Sweden). \ - * **tr-tr**: Query language value for Turkish (Turkey). \ - * **hi-in**: Query language value for Hindi (India). \ - * **ar-sa**: Query language value for Arabic (Saudi Arabia). \ - * **ar-eg**: Query language value for Arabic (Egypt). \ - * **ar-ma**: Query language value for Arabic (Morocco). \ - * **ar-kw**: Query language value for Arabic (Kuwait). \ - * **ar-jo**: Query language value for Arabic (Jordan). \ - * **da-dk**: Query language value for Danish (Denmark). \ - * **no-no**: Query language value for Norwegian (Norway). \ - * **bg-bg**: Query language value for Bulgarian (Bulgaria). \ - * **hr-hr**: Query language value for Croatian (Croatia). \ - * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \ - * **ms-my**: Query language value for Malay (Malaysia). \ - * **ms-bn**: Query language value for Malay (Brunei Darussalam). \ - * **sl-sl**: Query language value for Slovenian (Slovenia). \ - * **ta-in**: Query language value for Tamil (India). \ - * **vi-vn**: Query language value for Vietnamese (Viet Nam). \ - * **el-gr**: Query language value for Greek (Greece). \ - * **ro-ro**: Query language value for Romanian (Romania). \ - * **is-is**: Query language value for Icelandic (Iceland). \ - * **id-id**: Query language value for Indonesian (Indonesia). \ - * **th-th**: Query language value for Thai (Thailand). \ - * **lt-lt**: Query language value for Lithuanian (Lithuania). \ - * **uk-ua**: Query language value for Ukrainian (Ukraine). \ - * **lv-lv**: Query language value for Latvian (Latvia). \ - * **et-ee**: Query language value for Estonian (Estonia). \ - * **ca-es**: Query language value for Catalan. \ - * **fi-fi**: Query language value for Finnish (Finland). \ - * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \ - * **sr-me**: Query language value for Serbian (Montenegro). \ - * **sr-rs**: Query language value for Serbian (Serbia). \ - * **sk-sk**: Query language value for Slovak (Slovakia). \ - * **nb-no**: Query language value for Norwegian (Norway). \ - * **hy-am**: Query language value for Armenian (Armenia). \ - * **bn-in**: Query language value for Bengali (India). \ - * **eu-es**: Query language value for Basque. \ - * **gl-es**: Query language value for Galician. \ - * **gu-in**: Query language value for Gujarati (India). \ - * **he-il**: Query language value for Hebrew (Israel). \ - * **ga-ie**: Query language value for Irish (Ireland). \ - * **kn-in**: Query language value for Kannada (India). \ - * **ml-in**: Query language value for Malayalam (India). \ - * **mr-in**: Query language value for Marathi (India). \ - * **fa-ae**: Query language value for Persian (U.A.E.). \ - * **pa-in**: Query language value for Punjabi (India). \ - * **te-in**: Query language value for Telugu (India). \ - * **ur-pk**: Query language value for Urdu (Pakistan). - */ -export type QueryLanguage = string; - -/** Known values of {@link QuerySpellerType} that the service accepts. */ -export enum KnownQuerySpellerType { - /** Speller not enabled. */ - None = "none", - /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */ - Lexicon = "lexicon", -} - -/** - * Defines values for QuerySpellerType. \ - * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **none**: Speller not enabled. \ - * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. - */ -export type QuerySpellerType = string; - /** Known values of {@link VectorQueryKind} that the service accepts. */ export enum KnownVectorQueryKind { /** Vector query where a raw vector value is provided. */ Vector = "vector", /** Vector query where a text value that needs to be vectorized is provided. */ Text = "text", - /** Vector query where an url that represents an image value that needs to be vectorized is provided. */ - ImageUrl = "imageUrl", - /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */ - ImageBinary = "imageBinary", } /** @@ -1160,30 +608,10 @@ export enum KnownVectorQueryKind { * this enum contains the known values that the service supports. * ### Known values supported by the service * **vector**: Vector query where a raw vector value is provided. \ - * **text**: Vector query where a text value that needs to be vectorized is provided. \ - * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided. \ - * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. + * **text**: Vector query where a text value that needs to be vectorized is provided. */ export type VectorQueryKind = string; -/** Known values of {@link VectorThresholdKind} that the service accepts. */ -export enum KnownVectorThresholdKind { - /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */ - VectorSimilarity = "vectorSimilarity", - /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */ - SearchScore = "searchScore", -} - -/** - * Defines values for VectorThresholdKind. \ - * {@link KnownVectorThresholdKind} can be used interchangeably with VectorThresholdKind, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **vectorSimilarity**: The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. \ - * **searchScore**: The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. - */ -export type VectorThresholdKind = string; - /** Known values of {@link VectorFilterMode} that the service accepts. */ export enum KnownVectorFilterMode { /** The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. */ @@ -1202,45 +630,6 @@ export enum KnownVectorFilterMode { */ export type VectorFilterMode = string; -/** Known values of {@link HybridCountAndFacetMode} that the service accepts. */ -export enum KnownHybridCountAndFacetMode { - /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */ - CountRetrievableResults = "countRetrievableResults", - /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */ - CountAllResults = "countAllResults", -} - -/** - * Defines values for HybridCountAndFacetMode. \ - * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \ - * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. - */ -export type HybridCountAndFacetMode = string; - -/** Known values of {@link SemanticFieldState} that the service accepts. */ -export enum KnownSemanticFieldState { - /** The field was fully used for semantic enrichment. */ - Used = "used", - /** The field was not used for semantic enrichment. */ - Unused = "unused", - /** The field was partially used for semantic enrichment. */ - Partial = "partial", -} - -/** - * Defines values for SemanticFieldState. \ - * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **used**: The field was fully used for semantic enrichment. \ - * **unused**: The field was not used for semantic enrichment. \ - * **partial**: The field was partially used for semantic enrichment. - */ -export type SemanticFieldState = string; - /** Known values of {@link SemanticErrorReason} that the service accepts. */ export enum KnownSemanticErrorReason { /** If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. */ @@ -1279,21 +668,6 @@ export enum KnownSemanticSearchResultsType { * **rerankedResults**: Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. */ export type SemanticSearchResultsType = string; - -/** Known values of {@link SemanticQueryRewritesResultType} that the service accepts. */ -export enum KnownSemanticQueryRewritesResultType { - /** Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. */ - OriginalQueryOnly = "originalQueryOnly", -} - -/** - * Defines values for SemanticQueryRewritesResultType. \ - * {@link KnownSemanticQueryRewritesResultType} can be used interchangeably with SemanticQueryRewritesResultType, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **originalQueryOnly**: Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. - */ -export type SemanticQueryRewritesResultType = string; /** Defines values for QueryType. */ export type QueryType = "simple" | "full" | "semantic"; /** Defines values for SearchMode. */ diff --git a/sdk/search/search-documents/src/generated/data/models/mappers.ts b/sdk/search/search-documents/src/generated/data/models/mappers.ts index 1223832bc3ad..acaddb9abfba 100644 --- a/sdk/search/search-documents/src/generated/data/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/data/models/mappers.ts @@ -194,20 +194,6 @@ export const SearchDocumentsResult: coreClient.CompositeMapper = { name: "String", }, }, - semanticQueryRewritesResultType: { - serializedName: "@search\\.semanticQueryRewritesResultType", - readOnly: true, - type: { - name: "String", - }, - }, - debugInfo: { - serializedName: "@search\\.debug", - type: { - name: "Composite", - className: "DebugInfo", - }, - }, }, }, }; @@ -225,21 +211,6 @@ export const FacetResult: coreClient.CompositeMapper = { name: "Number", }, }, - facets: { - serializedName: "@search\\.facets", - readOnly: true, - type: { - name: "Dictionary", - value: { - type: { - name: "Sequence", - element: { - type: { name: "Composite", className: "FacetResult" }, - }, - }, - }, - }, - }, }, }, }; @@ -381,12 +352,6 @@ export const SearchRequest: coreClient.CompositeMapper = { name: "String", }, }, - debug: { - serializedName: "debug", - type: { - name: "String", - }, - }, searchText: { serializedName: "search", type: { @@ -406,18 +371,6 @@ export const SearchRequest: coreClient.CompositeMapper = { allowedValues: ["any", "all"], }, }, - queryLanguage: { - serializedName: "queryLanguage", - type: { - name: "String", - }, - }, - speller: { - serializedName: "speller", - type: { - name: "String", - }, - }, select: { serializedName: "select", type: { @@ -476,18 +429,6 @@ export const SearchRequest: coreClient.CompositeMapper = { name: "String", }, }, - queryRewrites: { - serializedName: "queryRewrites", - type: { - name: "String", - }, - }, - semanticFields: { - serializedName: "semanticFields", - type: { - name: "String", - }, - }, vectorQueries: { serializedName: "vectorQueries", type: { @@ -506,13 +447,6 @@ export const SearchRequest: coreClient.CompositeMapper = { name: "String", }, }, - hybridSearch: { - serializedName: "hybridSearch", - type: { - name: "Composite", - className: "HybridSearch", - }, - }, }, }, }; @@ -564,61 +498,6 @@ export const VectorQuery: coreClient.CompositeMapper = { name: "Number", }, }, - threshold: { - serializedName: "threshold", - type: { - name: "Composite", - className: "VectorThreshold", - }, - }, - filterOverride: { - serializedName: "filterOverride", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorThreshold: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorThreshold", - uberParent: "VectorThreshold", - polymorphicDiscriminator: { - serializedName: "kind", - clientName: "kind", - }, - modelProperties: { - kind: { - serializedName: "kind", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const HybridSearch: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "HybridSearch", - modelProperties: { - maxTextRecallSize: { - serializedName: "maxTextRecallSize", - type: { - name: "Number", - }, - }, - countAndFacetMode: { - serializedName: "countAndFacetMode", - type: { - name: "String", - }, - }, }, }, }; @@ -669,13 +548,6 @@ export const SearchResult: coreClient.CompositeMapper = { }, }, }, - documentDebugInfo: { - serializedName: "@search\\.documentDebugInfo", - type: { - name: "Composite", - className: "DocumentDebugInfo", - }, - }, }, }, }; @@ -705,300 +577,6 @@ export const QueryCaptionResult: coreClient.CompositeMapper = { }, }; -export const DocumentDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DocumentDebugInfo", - modelProperties: { - semantic: { - serializedName: "semantic", - type: { - name: "Composite", - className: "SemanticDebugInfo", - }, - }, - vectors: { - serializedName: "vectors", - type: { - name: "Composite", - className: "VectorsDebugInfo", - }, - }, - }, - }, -}; - -export const SemanticDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SemanticDebugInfo", - modelProperties: { - titleField: { - serializedName: "titleField", - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - }, - }, - contentFields: { - serializedName: "contentFields", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - }, - }, - }, - }, - keywordFields: { - serializedName: "keywordFields", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - }, - }, - }, - }, - rerankerInput: { - serializedName: "rerankerInput", - type: { - name: "Composite", - className: "QueryResultDocumentRerankerInput", - }, - }, - }, - }, -}; - -export const QueryResultDocumentSemanticField: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentSemanticField", - modelProperties: { - name: { - serializedName: "name", - readOnly: true, - type: { - name: "String", - }, - }, - state: { - serializedName: "state", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const QueryResultDocumentRerankerInput: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentRerankerInput", - modelProperties: { - title: { - serializedName: "title", - readOnly: true, - type: { - name: "String", - }, - }, - content: { - serializedName: "content", - readOnly: true, - type: { - name: "String", - }, - }, - keywords: { - serializedName: "keywords", - readOnly: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorsDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "VectorsDebugInfo", - modelProperties: { - subscores: { - serializedName: "subscores", - type: { - name: "Composite", - className: "QueryResultDocumentSubscores", - }, - }, - }, - }, -}; - -export const QueryResultDocumentSubscores: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryResultDocumentSubscores", - modelProperties: { - text: { - serializedName: "text", - type: { - name: "Composite", - className: "TextResult", - }, - }, - vectors: { - serializedName: "vectors", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Dictionary", - value: { - type: { - name: "Composite", - className: "SingleVectorFieldResult", - }, - }, - }, - }, - }, - }, - documentBoost: { - serializedName: "documentBoost", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const TextResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "TextResult", - modelProperties: { - searchScore: { - serializedName: "searchScore", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SingleVectorFieldResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SingleVectorFieldResult", - modelProperties: { - searchScore: { - serializedName: "searchScore", - readOnly: true, - type: { - name: "Number", - }, - }, - vectorSimilarity: { - serializedName: "vectorSimilarity", - readOnly: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const DebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DebugInfo", - modelProperties: { - queryRewrites: { - serializedName: "queryRewrites", - type: { - name: "Composite", - className: "QueryRewritesDebugInfo", - }, - }, - }, - }, -}; - -export const QueryRewritesDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryRewritesDebugInfo", - modelProperties: { - text: { - serializedName: "text", - type: { - name: "Composite", - className: "QueryRewritesValuesDebugInfo", - }, - }, - vectors: { - serializedName: "vectors", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "QueryRewritesValuesDebugInfo", - }, - }, - }, - }, - }, - }, -}; - -export const QueryRewritesValuesDebugInfo: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "QueryRewritesValuesDebugInfo", - modelProperties: { - inputQuery: { - serializedName: "inputQuery", - readOnly: true, - type: { - name: "String", - }, - }, - rewrites: { - serializedName: "rewrites", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - export const SuggestDocumentsResult: coreClient.CompositeMapper = { type: { name: "Composite", @@ -1395,101 +973,12 @@ export const VectorizableTextQuery: coreClient.CompositeMapper = { name: "String", }, }, - queryRewrites: { - serializedName: "queryRewrites", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorizableImageUrlQuery: coreClient.CompositeMapper = { - serializedName: "imageUrl", - type: { - name: "Composite", - className: "VectorizableImageUrlQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: VectorQuery.type.polymorphicDiscriminator, - modelProperties: { - ...VectorQuery.type.modelProperties, - url: { - serializedName: "url", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorizableImageBinaryQuery: coreClient.CompositeMapper = { - serializedName: "imageBinary", - type: { - name: "Composite", - className: "VectorizableImageBinaryQuery", - uberParent: "VectorQuery", - polymorphicDiscriminator: VectorQuery.type.polymorphicDiscriminator, - modelProperties: { - ...VectorQuery.type.modelProperties, - base64Image: { - serializedName: "base64Image", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const VectorSimilarityThreshold: coreClient.CompositeMapper = { - serializedName: "vectorSimilarity", - type: { - name: "Composite", - className: "VectorSimilarityThreshold", - uberParent: "VectorThreshold", - polymorphicDiscriminator: VectorThreshold.type.polymorphicDiscriminator, - modelProperties: { - ...VectorThreshold.type.modelProperties, - value: { - serializedName: "value", - required: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const SearchScoreThreshold: coreClient.CompositeMapper = { - serializedName: "searchScore", - type: { - name: "Composite", - className: "SearchScoreThreshold", - uberParent: "VectorThreshold", - polymorphicDiscriminator: VectorThreshold.type.polymorphicDiscriminator, - modelProperties: { - ...VectorThreshold.type.modelProperties, - value: { - serializedName: "value", - required: true, - type: { - name: "Number", - }, - }, }, }, }; export let discriminators = { VectorQuery: VectorQuery, - VectorThreshold: VectorThreshold, "VectorQuery.vector": VectorizedQuery, "VectorQuery.text": VectorizableTextQuery, - "VectorQuery.imageUrl": VectorizableImageUrlQuery, - "VectorQuery.imageBinary": VectorizableImageBinaryQuery, - "VectorThreshold.vectorSimilarity": VectorSimilarityThreshold, - "VectorThreshold.searchScore": SearchScoreThreshold, }; diff --git a/sdk/search/search-documents/src/generated/data/models/parameters.ts b/sdk/search/search-documents/src/generated/data/models/parameters.ts index f8b2627ed34a..a4822b2e3931 100644 --- a/sdk/search/search-documents/src/generated/data/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/data/models/parameters.ts @@ -16,7 +16,7 @@ import { SuggestRequest as SuggestRequestMapper, IndexBatch as IndexBatchMapper, AutocompleteRequest as AutocompleteRequestMapper, -} from "../models/mappers.js"; +} from "../models/mappers"; export const accept: OperationParameter = { parameterPath: "accept", @@ -356,62 +356,6 @@ export const semanticQuery: OperationQueryParameter = { }, }; -export const queryRewrites: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "queryRewrites"], - mapper: { - serializedName: "queryRewrites", - type: { - name: "String", - }, - }, -}; - -export const debug: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "debug"], - mapper: { - serializedName: "debug", - type: { - name: "String", - }, - }, -}; - -export const queryLanguage: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "queryLanguage"], - mapper: { - serializedName: "queryLanguage", - type: { - name: "String", - }, - }, -}; - -export const speller: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "speller"], - mapper: { - serializedName: "speller", - type: { - name: "String", - }, - }, -}; - -export const semanticFields: OperationQueryParameter = { - parameterPath: ["options", "searchOptions", "semanticFields"], - mapper: { - serializedName: "semanticFields", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - collectionFormat: "CSV", -}; - export const contentType: OperationParameter = { parameterPath: ["options", "contentType"], mapper: { diff --git a/sdk/search/search-documents/src/generated/data/operations/documents.ts b/sdk/search/search-documents/src/generated/data/operations/documents.ts index 567b29f484f7..301b5da08027 100644 --- a/sdk/search/search-documents/src/generated/data/operations/documents.ts +++ b/sdk/search/search-documents/src/generated/data/operations/documents.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Documents } from "../operationsInterfaces/index.js"; +import { Documents } from "../operationsInterfaces"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchClient } from "../searchClient.js"; +import * as Mappers from "../models/mappers"; +import * as Parameters from "../models/parameters"; +import { SearchClient } from "../searchClient"; import { DocumentsCountOptionalParams, DocumentsCountResponse, @@ -34,7 +34,7 @@ import { AutocompleteRequest, DocumentsAutocompletePostOptionalParams, DocumentsAutocompletePostResponse, -} from "../models/index.js"; +} from "../models"; /** Class containing Documents operations. */ export class DocumentsImpl implements Documents { @@ -237,11 +237,6 @@ const searchGetOperationSpec: coreClient.OperationSpec = { Parameters.answers, Parameters.captions, Parameters.semanticQuery, - Parameters.queryRewrites, - Parameters.debug, - Parameters.queryLanguage, - Parameters.speller, - Parameters.semanticFields, ], urlParameters: [Parameters.endpoint, Parameters.indexName], headerParameters: [Parameters.accept], diff --git a/sdk/search/search-documents/src/generated/data/operations/index.ts b/sdk/search/search-documents/src/generated/data/operations/index.ts index e6fde9effe60..77c96e3f8b79 100644 --- a/sdk/search/search-documents/src/generated/data/operations/index.ts +++ b/sdk/search/search-documents/src/generated/data/operations/index.ts @@ -6,4 +6,4 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./documents.js"; +export * from "./documents"; diff --git a/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts b/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts index cf365fcb51c8..2cedcc7c4163 100644 --- a/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts +++ b/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts @@ -29,7 +29,7 @@ import { AutocompleteRequest, DocumentsAutocompletePostOptionalParams, DocumentsAutocompletePostResponse, -} from "../models/index.js"; +} from "../models"; /** Interface representing a Documents. */ export interface Documents { diff --git a/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts index e6fde9effe60..77c96e3f8b79 100644 --- a/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts +++ b/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts @@ -6,4 +6,4 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./documents.js"; +export * from "./documents"; diff --git a/sdk/search/search-documents/src/generated/data/searchClient.ts b/sdk/search/search-documents/src/generated/data/searchClient.ts index 913be1b41833..7773e7490bac 100644 --- a/sdk/search/search-documents/src/generated/data/searchClient.ts +++ b/sdk/search/search-documents/src/generated/data/searchClient.ts @@ -12,18 +12,15 @@ import { PipelineResponse, SendRequest, } from "@azure/core-rest-pipeline"; -import { DocumentsImpl } from "./operations/index.js"; -import { Documents } from "./operationsInterfaces/index.js"; -import { - ApiVersion20241101Preview, - SearchClientOptionalParams, -} from "./models/index.js"; +import { DocumentsImpl } from "./operations"; +import { Documents } from "./operationsInterfaces"; +import { ApiVersion20240701, SearchClientOptionalParams } from "./models"; /** @internal */ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { endpoint: string; indexName: string; - apiVersion: ApiVersion20241101Preview; + apiVersion: ApiVersion20240701; /** * Initializes a new instance of the SearchClient class. @@ -35,7 +32,7 @@ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { constructor( endpoint: string, indexName: string, - apiVersion: ApiVersion20241101Preview, + apiVersion: ApiVersion20240701, options?: SearchClientOptionalParams, ) { if (endpoint === undefined) { @@ -56,7 +53,7 @@ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { requestContentType: "application/json; charset=utf-8", }; - const packageDetails = `azsdk-js-search-documents/12.2.0-beta.2`; + const packageDetails = `azsdk-js-search-documents/12.1.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` diff --git a/sdk/search/search-documents/src/generated/service/index.ts b/sdk/search/search-documents/src/generated/service/index.ts index 275645f04104..707b92eedf04 100644 --- a/sdk/search/search-documents/src/generated/service/index.ts +++ b/sdk/search/search-documents/src/generated/service/index.ts @@ -6,6 +6,6 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./models/index.js"; -export { SearchServiceClient } from "./searchServiceClient.js"; -export * from "./operationsInterfaces/index.js"; +export * from "./models"; +export { SearchServiceClient } from "./searchServiceClient"; +export * from "./operationsInterfaces"; diff --git a/sdk/search/search-documents/src/generated/service/models/index.ts b/sdk/search/search-documents/src/generated/service/models/index.ts index 1ee66b55ab7c..936104ccf8a7 100644 --- a/sdk/search/search-documents/src/generated/service/models/index.ts +++ b/sdk/search/search-documents/src/generated/service/models/index.ts @@ -9,18 +9,13 @@ import * as coreClient from "@azure/core-client"; import * as coreHttpCompat from "@azure/core-http-compat"; -export type SearchIndexerDataIdentityUnion = - | SearchIndexerDataIdentity - | SearchIndexerDataNoneIdentity - | SearchIndexerDataUserAssignedIdentity; export type DataChangeDetectionPolicyUnion = | DataChangeDetectionPolicy | HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy; export type DataDeletionDetectionPolicyUnion = | DataDeletionDetectionPolicy - | SoftDeleteColumnDeletionDetectionPolicy - | NativeBlobSoftDeleteDeletionDetectionPolicy; + | SoftDeleteColumnDeletionDetectionPolicy; export type SearchIndexerSkillUnion = | SearchIndexerSkill | ConditionalSkill @@ -40,17 +35,12 @@ export type SearchIndexerSkillUnion = | CustomEntityLookupSkill | TextTranslationSkill | DocumentExtractionSkill - | DocumentIntelligenceLayoutSkill | WebApiSkill - | AzureMachineLearningSkill - | AzureOpenAIEmbeddingSkill - | VisionVectorizeSkill; + | AzureOpenAIEmbeddingSkill; export type CognitiveServicesAccountUnion = | CognitiveServicesAccount | DefaultCognitiveServicesAccount - | CognitiveServicesAccountKey - | AIServicesAccountKey - | AIServicesAccountIdentity; + | CognitiveServicesAccountKey; export type ScoringFunctionUnion = | ScoringFunction | DistanceScoringFunction @@ -108,7 +98,6 @@ export type CharFilterUnion = | CharFilter | MappingCharFilter | PatternReplaceCharFilter; -export type LexicalNormalizerUnion = LexicalNormalizer | CustomNormalizer; export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity; export type VectorSearchAlgorithmConfigurationUnion = | VectorSearchAlgorithmConfiguration @@ -117,13 +106,15 @@ export type VectorSearchAlgorithmConfigurationUnion = export type VectorSearchVectorizerUnion = | VectorSearchVectorizer | AzureOpenAIVectorizer - | WebApiVectorizer - | AIServicesVisionVectorizer - | AMLVectorizer; + | WebApiVectorizer; export type VectorSearchCompressionUnion = | VectorSearchCompression | ScalarQuantizationCompression | BinaryQuantizationCompression; +export type SearchIndexerDataIdentityUnion = + | SearchIndexerDataIdentity + | SearchIndexerDataNoneIdentity + | SearchIndexerDataUserAssignedIdentity; /** Represents a datasource definition, which can be used to configure an indexer. */ export interface SearchIndexerDataSource { @@ -137,8 +128,6 @@ export interface SearchIndexerDataSource { credentials: DataSourceCredentials; /** The data container for the datasource. */ container: SearchIndexerDataContainer; - /** An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If "none" is specified, the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; /** The data change detection policy for the datasource. */ dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion; /** The data deletion detection policy for the datasource. */ @@ -163,14 +152,6 @@ export interface SearchIndexerDataContainer { query?: string; } -/** Abstract base type for data identities. */ -export interface SearchIndexerDataIdentity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.DataNoneIdentity" - | "#Microsoft.Azure.Search.DataUserAssignedIdentity"; -} - /** Base type for data change detection policies. */ export interface DataChangeDetectionPolicy { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -182,9 +163,7 @@ export interface DataChangeDetectionPolicy { /** Base type for data deletion detection policies. */ export interface DataDeletionDetectionPolicy { /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: - | "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" - | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; } /** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */ @@ -197,8 +176,6 @@ export interface SearchResourceEncryptionKey { vaultUri: string; /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */ accessCredentials?: AzureActiveDirectoryApplicationCredentials; - /** An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified, the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; } /** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */ @@ -267,13 +244,6 @@ export interface ListDataSourcesResult { readonly dataSources: SearchIndexerDataSource[]; } -export interface DocumentKeysOrIds { - /** document keys to be reset */ - documentKeys?: string[]; - /** datasource document identifiers to be reset */ - datasourceDocumentIds?: string[]; -} - /** Represents an indexer. */ export interface SearchIndexer { /** The name of the indexer. */ @@ -300,8 +270,6 @@ export interface SearchIndexer { etag?: string; /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ encryptionKey?: SearchResourceEncryptionKey; - /** Adds caching to an enrichment pipeline to allow for incremental modification steps without having to rebuild the index every time. */ - cache?: SearchIndexerCache; } /** Represents a schedule for indexer execution. */ @@ -346,10 +314,6 @@ export interface IndexingParametersConfiguration { delimitedTextDelimiter?: string; /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */ firstLineContainsHeaders?: boolean; - /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ - markdownParsingSubmode?: MarkdownParsingSubmode; - /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ - markdownHeaderDepth?: MarkdownHeaderDepth; /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */ documentRoot?: string; /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ @@ -384,15 +348,6 @@ export interface FieldMappingFunction { parameters?: { [propertyName: string]: any }; } -export interface SearchIndexerCache { - /** The connection string to the storage account where the cache data will be persisted. */ - storageConnectionString?: string; - /** Specifies whether incremental reprocessing is enabled. */ - enableReprocessing?: boolean; - /** The user-assigned managed identity used for connections to the enrichment cache. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; -} - /** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */ export interface ListIndexersResult { /** @@ -433,16 +388,6 @@ export interface IndexerExecutionResult { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly status: IndexerExecutionStatus; - /** - * The outcome of this indexer execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly statusDetail?: IndexerExecutionStatusDetail; - /** - * All of the state that defines and dictates the indexer's current execution. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly currentState?: IndexerState; /** * The error message indicating the top-level error, if any. * NOTE: This property will not be serialized. It can only be populated by the server. @@ -490,45 +435,6 @@ export interface IndexerExecutionResult { readonly finalTrackingState?: string; } -/** Represents all of the state that defines and dictates the indexer's current execution. */ -export interface IndexerState { - /** - * The mode the indexer is running in. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly mode?: IndexingMode; - /** - * Change tracking state used when indexing starts on all documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly allDocumentsInitialChangeTrackingState?: string; - /** - * Change tracking state value when indexing finishes on all documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly allDocumentsFinalChangeTrackingState?: string; - /** - * Change tracking state used when indexing starts on select, reset documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDocumentsInitialChangeTrackingState?: string; - /** - * Change tracking state value when indexing finishes on select, reset documents in the datasource. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDocumentsFinalChangeTrackingState?: string; - /** - * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDocumentKeys?: string[]; - /** - * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly resetDatasourceDocumentIds?: string[]; -} - /** Represents an item- or document-level indexing error. */ export interface SearchIndexerError { /** @@ -651,11 +557,8 @@ export interface SearchIndexerSkill { | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" - | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" - | "#Microsoft.Skills.Custom.AmlSkill" - | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" - | "#Microsoft.Skills.Vision.VectorizeSkill"; + | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */ name?: string; /** The description of the skill which describes the inputs, outputs, and usage of the skill. */ @@ -693,9 +596,7 @@ export interface CognitiveServicesAccount { /** Polymorphic discriminator, which specifies the different types this object can be */ odatatype: | "#Microsoft.Azure.Search.DefaultCognitiveServices" - | "#Microsoft.Azure.Search.CognitiveServicesByKey" - | "#Microsoft.Azure.Search.AIServicesByKey" - | "#Microsoft.Azure.Search.AIServicesByIdentity"; + | "#Microsoft.Azure.Search.CognitiveServicesByKey"; /** Description of the Azure AI service resource attached to a skillset. */ description?: string; } @@ -706,10 +607,6 @@ export interface SearchIndexerKnowledgeStore { storageConnectionString: string; /** A list of additional projections to perform during indexing. */ projections: SearchIndexerKnowledgeStoreProjection[]; - /** The user-assigned managed identity used for connections to Azure Storage when writing knowledge store projections. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity?: SearchIndexerDataIdentityUnion; - /** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ - parameters?: SearchIndexerKnowledgeStoreParameters; } /** Container object for various projection selectors. */ @@ -736,14 +633,6 @@ export interface SearchIndexerKnowledgeStoreProjectionSelector { inputs?: InputFieldMappingEntry[]; } -/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ -export interface SearchIndexerKnowledgeStoreParameters { - /** Describes unknown properties. The value of an unknown property can be of "any" type. */ - [property: string]: any; - /** Whether or not projections should synthesize a generated key name if one isn't already present. */ - synthesizeGeneratedKeyName?: boolean; -} - /** Definition of additional projections to secondary search indexes. */ export interface SearchIndexerIndexProjection { /** A list of projections to be performed to secondary search indexes. */ @@ -781,11 +670,6 @@ export interface ListSkillsetsResult { readonly skillsets: SearchIndexerSkillset[]; } -export interface SkillNames { - /** the names of skills to be reset. */ - skillNames?: string[]; -} - /** Represents a synonym map definition. */ export interface SynonymMap { /** The name of the synonym map. */ @@ -831,8 +715,6 @@ export interface SearchIndex { tokenFilters?: TokenFilterUnion[]; /** The character filters for the index. */ charFilters?: CharFilterUnion[]; - /** The normalizers for the index. */ - normalizers?: LexicalNormalizerUnion[]; /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ encryptionKey?: SearchResourceEncryptionKey; /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */ @@ -871,8 +753,6 @@ export interface SearchField { searchAnalyzer?: LexicalAnalyzerName; /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ indexAnalyzer?: LexicalAnalyzerName; - /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */ - normalizer?: LexicalNormalizerName; /** The dimensionality of the vector field. */ vectorSearchDimensions?: number; /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */ @@ -1008,14 +888,6 @@ export interface CharFilter { name: string; } -/** Base type for normalizers. */ -export interface LexicalNormalizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; - /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */ - name: string; -} - /** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */ export interface Similarity { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -1090,7 +962,7 @@ export interface VectorSearchAlgorithmConfiguration { /** Specifies the vectorization method to be used during query time. */ export interface VectorSearchVectorizer { /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "azureOpenAI" | "customWebApi" | "aiServicesVision" | "aml"; + kind: "azureOpenAI" | "customWebApi"; /** The name to associate with this particular vectorization method. */ vectorizerName: string; } @@ -1105,20 +977,6 @@ export interface VectorSearchCompression { rerankWithOriginalVectors?: boolean; /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */ defaultOversampling?: number; - /** Contains the options for rescoring. */ - rescoringOptions?: RescoringOptions; - /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */ - truncationDimension?: number; -} - -/** Contains the options for rescoring. */ -export interface RescoringOptions { - /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */ - enableRescoring?: boolean; - /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */ - defaultOversampling?: number; - /** Controls the storage method for original vectors. This setting is immutable. */ - rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; } /** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */ @@ -1157,8 +1015,6 @@ export interface AnalyzeRequest { analyzer?: string; /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */ tokenizer?: string; - /** The name of the normalizer to use to normalize the given text. */ - normalizer?: LexicalNormalizerName; /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ tokenFilters?: string[]; /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ @@ -1195,25 +1051,6 @@ export interface AnalyzedTokenInfo { readonly position: number; } -/** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */ -export interface SearchAlias { - /** The name of the alias. */ - name: string; - /** The name of the index this alias maps to. Only one index name may be specified. */ - indexes: string[]; - /** The ETag of the alias. */ - etag?: string; -} - -/** Response from a List Aliases request. If successful, it includes the associated index mappings for all aliases. */ -export interface ListAliasesResult { - /** - * The aliases in the Search service. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly aliases: SearchAlias[]; -} - /** Response from a get service statistics request. If successful, it includes service level counters and limits. */ export interface ServiceStatistics { /** Service level resource counters. */ @@ -1224,8 +1061,6 @@ export interface ServiceStatistics { /** Represents service-level resource counters and quotas. */ export interface ServiceCounters { - /** Total number of aliases. */ - aliasCounter: ResourceCounter; /** Total number of documents across all indexes in the service. */ documentCounter: ResourceCounter; /** Total number of indexes. */ @@ -1304,6 +1139,14 @@ export interface AzureOpenAIParameters { modelName?: AzureOpenAIModelName; } +/** Abstract base type for data identities. */ +export interface SearchIndexerDataIdentity { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: + | "#Microsoft.Azure.Search.DataNoneIdentity" + | "#Microsoft.Azure.Search.DataUserAssignedIdentity"; +} + /** Specifies the properties for connecting to a user-defined vectorizer. */ export interface WebApiParameters { /** The URI of the Web API providing the vectorizer. */ @@ -1320,34 +1163,6 @@ export interface WebApiParameters { authIdentity?: SearchIndexerDataIdentityUnion; } -/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ -export interface AIServicesVisionParameters { - /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ - modelVersion: string | null; - /** The resource URI of the AI Services resource. */ - resourceUri: string; - /** API key of the designated AI Services resource. */ - apiKey?: string; - /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - authIdentity?: SearchIndexerDataIdentityUnion; -} - -/** Specifies the properties for connecting to an AML vectorizer. */ -export interface AMLParameters { - /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ - scoringUri: string | null; - /** (Required for key authentication) The key for the AML service. */ - authenticationKey?: string; - /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ - resourceId?: string; - /** (Optional) When specified, indicates the timeout for the http client making the API call. */ - timeout?: string; - /** (Optional for token authentication). The region the AML service is deployed in. */ - region?: string; - /** The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. */ - modelName?: AIStudioModelCatalogName; -} - /** Provides parameter values to a distance scoring function. */ export interface DistanceScoringParameters { /** The name of the parameter passed in search queries to specify the reference location. */ @@ -1378,6 +1193,14 @@ export interface TagScoringParameters { tagsParameter: string; } +/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */ +export interface SearchIndexerKnowledgeStoreParameters { + /** Describes unknown properties. The value of an unknown property can be of "any" type. */ + [property: string]: any; + /** Whether or not projections should synthesize a generated key name if one isn't already present. */ + synthesizeGeneratedKeyName?: boolean; +} + /** An object that contains information about the matches that were found, and related metadata. */ export interface CustomEntity { /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. */ @@ -1418,29 +1241,6 @@ export interface CustomEntityAlias { fuzzyEditDistance?: number; } -export interface AzureOpenAITokenizerParameters { - /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */ - encoderModelName?: SplitSkillEncoderModelName; - /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */ - allowedSpecialTokens?: string[]; -} - -/** Clears the identity property of a datasource. */ -export interface SearchIndexerDataNoneIdentity - extends SearchIndexerDataIdentity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.DataNoneIdentity"; -} - -/** Specifies the identity for a datasource to use. */ -export interface SearchIndexerDataUserAssignedIdentity - extends SearchIndexerDataIdentity { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity"; - /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */ - resourceId: string; -} - /** Defines a data change detection policy that captures changes based on the value of a high water mark column. */ export interface HighWaterMarkChangeDetectionPolicy extends DataChangeDetectionPolicy { @@ -1468,13 +1268,6 @@ export interface SoftDeleteColumnDeletionDetectionPolicy softDeleteMarkerValue?: string; } -/** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */ -export interface NativeBlobSoftDeleteDeletionDetectionPolicy - extends DataDeletionDetectionPolicy { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; -} - /** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */ export interface ConditionalSkill extends SearchIndexerSkill { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -1645,10 +1438,6 @@ export interface SplitSkill extends SearchIndexerSkill { pageOverlapLength?: number; /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */ maximumPagesToTake?: number; - /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */ - unit?: SplitSkillUnit; - /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */ - azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; } /** A skill looks for text from a custom, user-defined list of words and phrases. */ @@ -1693,54 +1482,14 @@ export interface DocumentExtractionSkill extends SearchIndexerSkill { configuration?: { [propertyName: string]: any }; } -/** A skill that extracts content and layout information (as markdown), via Azure AI Services, from files within the enrichment pipeline. */ -export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; - /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ - outputMode?: DocumentIntelligenceLayoutSkillOutputMode; - /** The depth of headers in the markdown output. Default is h6. */ - markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; -} - /** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */ -export interface WebApiSkill extends SearchIndexerSkill { +export interface WebApiSkill extends SearchIndexerSkill, WebApiParameters { /** Polymorphic discriminator, which specifies the different types this object can be */ odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; - /** The url for the Web API. */ - uri: string; - /** The headers required to make the http request. */ - httpHeaders?: { [propertyName: string]: string }; - /** The method for the http request. */ - httpMethod?: string; - /** The desired timeout for the request. Default is 30 seconds. */ - timeout?: string; /** The desired batch size which indicates number of documents. */ batchSize?: number; /** If set, the number of parallel calls that can be made to the Web API. */ degreeOfParallelism?: number; - /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ - authResourceId?: string; - /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - authIdentity?: SearchIndexerDataIdentityUnion; -} - -/** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */ -export interface AzureMachineLearningSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Custom.AmlSkill"; - /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ - scoringUri?: string; - /** (Required for key authentication) The key for the AML service. */ - authenticationKey?: string; - /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */ - resourceId?: string; - /** (Optional) When specified, indicates the timeout for the http client making the API call. */ - timeout?: string; - /** (Optional for token authentication). The region the AML service is deployed in. */ - region?: string; - /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */ - degreeOfParallelism?: number; } /** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */ @@ -1753,14 +1502,6 @@ export interface AzureOpenAIEmbeddingSkill dimensions?: number; } -/** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */ -export interface VisionVectorizeSkill extends SearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; - /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ - modelVersion: string | null; -} - /** An empty object that represents the default Azure AI service resource for a skillset. */ export interface DefaultCognitiveServicesAccount extends CognitiveServicesAccount { @@ -1776,26 +1517,6 @@ export interface CognitiveServicesAccountKey extends CognitiveServicesAccount { key: string; } -/** The account key of an Azure AI service resource that's attached to a skillset, to be used with the resource's subdomain. */ -export interface AIServicesAccountKey extends CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.AIServicesByKey"; - /** The key used to provision the Azure AI service resource attached to a skillset. */ - key: string; - /** The subdomain url for the corresponding AI Service. */ - subdomainUrl: string; -} - -/** The multi-region account of an Azure AI service resource that's attached to a skillset. */ -export interface AIServicesAccountIdentity extends CognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; - /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity: SearchIndexerDataIdentityUnion | null; - /** The subdomain url for the corresponding AI Service. */ - subdomainUrl: string; -} - /** Description for what data to store in Azure Tables. */ export interface SearchIndexerKnowledgeStoreTableProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector { @@ -2306,16 +2027,6 @@ export interface PatternReplaceCharFilter extends CharFilter { replacement: string; } -/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */ -export interface CustomNormalizer extends LexicalNormalizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; - /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ - tokenFilters?: TokenFilterName[]; - /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ - charFilters?: CharFilterName[]; -} - /** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */ export interface ClassicSimilarity extends Similarity { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -2366,22 +2077,6 @@ export interface WebApiVectorizer extends VectorSearchVectorizer { parameters?: WebApiParameters; } -/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ -export interface AIServicesVisionVectorizer extends VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "aiServicesVision"; - /** Contains the parameters specific to AI Services Vision embedding vectorization. */ - aIServicesVisionParameters?: AIServicesVisionParameters; -} - -/** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. */ -export interface AMLVectorizer extends VectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "aml"; - /** Specifies the properties of the AML vectorizer. */ - aMLParameters?: AMLParameters; -} - /** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */ export interface ScalarQuantizationCompression extends VectorSearchCompression { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -2396,6 +2091,22 @@ export interface BinaryQuantizationCompression extends VectorSearchCompression { kind: "binaryQuantization"; } +/** Clears the identity property of a datasource. */ +export interface SearchIndexerDataNoneIdentity + extends SearchIndexerDataIdentity { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.DataNoneIdentity"; +} + +/** Specifies the identity for a datasource to use. */ +export interface SearchIndexerDataUserAssignedIdentity + extends SearchIndexerDataIdentity { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity"; + /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */ + resourceId: string; +} + /** Projection definition for what data to store in Azure Blob. */ export interface SearchIndexerKnowledgeStoreObjectProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} @@ -2404,20 +2115,20 @@ export interface SearchIndexerKnowledgeStoreObjectProjectionSelector export interface SearchIndexerKnowledgeStoreFileProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} -/** Known values of {@link ApiVersion20241101Preview} that the service accepts. */ -export enum KnownApiVersion20241101Preview { - /** Api Version '2024-11-01-preview' */ - TwoThousandTwentyFour1101Preview = "2024-11-01-preview", +/** Known values of {@link ApiVersion20240701} that the service accepts. */ +export enum KnownApiVersion20240701 { + /** Api Version '2024-07-01' */ + TwoThousandTwentyFour0701 = "2024-07-01", } /** - * Defines values for ApiVersion20241101Preview. \ - * {@link KnownApiVersion20241101Preview} can be used interchangeably with ApiVersion20241101Preview, + * Defines values for ApiVersion20240701. \ + * {@link KnownApiVersion20240701} can be used interchangeably with ApiVersion20240701, * this enum contains the known values that the service supports. * ### Known values supported by the service - * **2024-11-01-preview**: Api Version '2024-11-01-preview' + * **2024-07-01**: Api Version '2024-07-01' */ -export type ApiVersion20241101Preview = string; +export type ApiVersion20240701 = string; /** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */ export enum KnownSearchIndexerDataSourceType { @@ -2433,8 +2144,6 @@ export enum KnownSearchIndexerDataSourceType { MySql = "mysql", /** Indicates an ADLS Gen2 datasource. */ AdlsGen2 = "adlsgen2", - /** Indicates a Microsoft Fabric OneLake datasource. */ - OneLake = "onelake", } /** @@ -2447,8 +2156,7 @@ export enum KnownSearchIndexerDataSourceType { * **azureblob**: Indicates an Azure Blob datasource. \ * **azuretable**: Indicates an Azure Table datasource. \ * **mysql**: Indicates a MySql datasource. \ - * **adlsgen2**: Indicates an ADLS Gen2 datasource. \ - * **onelake**: Indicates a Microsoft Fabric OneLake datasource. + * **adlsgen2**: Indicates an ADLS Gen2 datasource. */ export type SearchIndexerDataSourceType = string; @@ -2466,8 +2174,6 @@ export enum KnownBlobIndexerParsingMode { JsonArray = "jsonArray", /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */ JsonLines = "jsonLines", - /** Set to markdown to extract content from markdown files. */ - Markdown = "markdown", } /** @@ -2480,59 +2186,10 @@ export enum KnownBlobIndexerParsingMode { * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \ * **json**: Set to json to extract structured content from JSON files. \ * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \ - * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. \ - * **markdown**: Set to markdown to extract content from markdown files. + * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */ export type BlobIndexerParsingMode = string; -/** Known values of {@link MarkdownParsingSubmode} that the service accepts. */ -export enum KnownMarkdownParsingSubmode { - /** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */ - OneToMany = "oneToMany", - /** Indicates that each markdown file will be parsed into a single search document. */ - OneToOne = "oneToOne", -} - -/** - * Defines values for MarkdownParsingSubmode. \ - * {@link KnownMarkdownParsingSubmode} can be used interchangeably with MarkdownParsingSubmode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **oneToMany**: Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. \ - * **oneToOne**: Indicates that each markdown file will be parsed into a single search document. - */ -export type MarkdownParsingSubmode = string; - -/** Known values of {@link MarkdownHeaderDepth} that the service accepts. */ -export enum KnownMarkdownHeaderDepth { - /** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */ - H1 = "h1", - /** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */ - H2 = "h2", - /** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */ - H3 = "h3", - /** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */ - H4 = "h4", - /** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */ - H5 = "h5", - /** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */ - H6 = "h6", -} - -/** - * Defines values for MarkdownHeaderDepth. \ - * {@link KnownMarkdownHeaderDepth} can be used interchangeably with MarkdownHeaderDepth, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **h1**: Indicates that headers up to a level of h1 will be considered while grouping markdown content. \ - * **h2**: Indicates that headers up to a level of h2 will be considered while grouping markdown content. \ - * **h3**: Indicates that headers up to a level of h3 will be considered while grouping markdown content. \ - * **h4**: Indicates that headers up to a level of h4 will be considered while grouping markdown content. \ - * **h5**: Indicates that headers up to a level of h5 will be considered while grouping markdown content. \ - * **h6**: Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. - */ -export type MarkdownHeaderDepth = string; - /** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */ export enum KnownBlobIndexerDataToExtract { /** Indexes just the standard blob properties and user-specified metadata. */ @@ -2611,39 +2268,6 @@ export enum KnownIndexerExecutionEnvironment { */ export type IndexerExecutionEnvironment = string; -/** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */ -export enum KnownIndexerExecutionStatusDetail { - /** Indicates that the reset that occurred was for a call to ResetDocs. */ - ResetDocs = "resetDocs", -} - -/** - * Defines values for IndexerExecutionStatusDetail. \ - * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs. - */ -export type IndexerExecutionStatusDetail = string; - -/** Known values of {@link IndexingMode} that the service accepts. */ -export enum KnownIndexingMode { - /** The indexer is indexing all documents in the datasource. */ - IndexingAllDocs = "indexingAllDocs", - /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */ - IndexingResetDocs = "indexingResetDocs", -} - -/** - * Defines values for IndexingMode. \ - * {@link KnownIndexingMode} can be used interchangeably with IndexingMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \ - * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. - */ -export type IndexingMode = string; - /** Known values of {@link IndexProjectionMode} that the service accepts. */ export enum KnownIndexProjectionMode { /** The source document will be skipped from writing into the indexer's target index. */ @@ -3004,33 +2628,6 @@ export enum KnownLexicalAnalyzerName { */ export type LexicalAnalyzerName = string; -/** Known values of {@link LexicalNormalizerName} that the service accepts. */ -export enum KnownLexicalNormalizerName { - /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ - AsciiFolding = "asciifolding", - /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ - Elision = "elision", - /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ - Lowercase = "lowercase", - /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ - Standard = "standard", - /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ - Uppercase = "uppercase", -} - -/** - * Defines values for LexicalNormalizerName. \ - * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ - * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ - * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ - * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ - * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html - */ -export type LexicalNormalizerName = string; - /** Known values of {@link VectorEncodingFormat} that the service accepts. */ export enum KnownVectorEncodingFormat { /** Encoding format representing bits packed into a wider data type. */ @@ -3070,10 +2667,6 @@ export enum KnownVectorSearchVectorizerKind { AzureOpenAI = "azureOpenAI", /** Generate embeddings using a custom web endpoint at query time. */ CustomWebApi = "customWebApi", - /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */ - AIServicesVision = "aiServicesVision", - /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. */ - AML = "aml", } /** @@ -3082,9 +2675,7 @@ export enum KnownVectorSearchVectorizerKind { * this enum contains the known values that the service supports. * ### Known values supported by the service * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \ - * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \ - * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \ - * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. + * **customWebApi**: Generate embeddings using a custom web endpoint at query time. */ export type VectorSearchVectorizerKind = string; @@ -3106,176 +2697,29 @@ export enum KnownVectorSearchCompressionKind { */ export type VectorSearchCompressionKind = string; -/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */ -export enum KnownVectorSearchCompressionRescoreStorageMethod { - /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */ - PreserveOriginals = "preserveOriginals", - /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */ - DiscardOriginals = "discardOriginals", +/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */ +export enum KnownVectorSearchAlgorithmMetric { + /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */ + Cosine = "cosine", + /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */ + Euclidean = "euclidean", + /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */ + DotProduct = "dotProduct", + /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */ + Hamming = "hamming", } /** - * Defines values for VectorSearchCompressionRescoreStorageMethod. \ - * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod, + * Defines values for VectorSearchAlgorithmMetric. \ + * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric, * this enum contains the known values that the service supports. * ### Known values supported by the service - * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \ - * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. + * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \ + * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \ + * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \ + * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */ -export type VectorSearchCompressionRescoreStorageMethod = string; - -/** Known values of {@link TokenFilterName} that the service accepts. */ -export enum KnownTokenFilterName { - /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */ - ArabicNormalization = "arabic_normalization", - /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */ - Apostrophe = "apostrophe", - /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ - AsciiFolding = "asciifolding", - /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */ - CjkBigram = "cjk_bigram", - /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */ - CjkWidth = "cjk_width", - /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */ - Classic = "classic", - /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */ - CommonGram = "common_grams", - /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */ - EdgeNGram = "edgeNGram_v2", - /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ - Elision = "elision", - /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */ - GermanNormalization = "german_normalization", - /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */ - HindiNormalization = "hindi_normalization", - /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */ - IndicNormalization = "indic_normalization", - /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */ - KeywordRepeat = "keyword_repeat", - /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */ - KStem = "kstem", - /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */ - Length = "length", - /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */ - Limit = "limit", - /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ - Lowercase = "lowercase", - /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */ - NGram = "nGram_v2", - /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */ - PersianNormalization = "persian_normalization", - /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */ - Phonetic = "phonetic", - /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */ - PorterStem = "porter_stem", - /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ - Reverse = "reverse", - /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */ - ScandinavianNormalization = "scandinavian_normalization", - /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */ - ScandinavianFoldingNormalization = "scandinavian_folding", - /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */ - Shingle = "shingle", - /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */ - Snowball = "snowball", - /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */ - SoraniNormalization = "sorani_normalization", - /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */ - Stemmer = "stemmer", - /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */ - Stopwords = "stopwords", - /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */ - Trim = "trim", - /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */ - Truncate = "truncate", - /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */ - Unique = "unique", - /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ - Uppercase = "uppercase", - /** Splits words into subwords and performs optional transformations on subword groups. */ - WordDelimiter = "word_delimiter", -} - -/** - * Defines values for TokenFilterName. \ - * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ - * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ - * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ - * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ - * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ - * **classic**: Removes English possessives, and dots from acronyms. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ - * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ - * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ - * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ - * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ - * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ - * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ - * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ - * **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ - * **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ - * **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ - * **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ - * **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ - * **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ - * **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ - * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\/\/tartarus.org\/~martin\/PorterStemmer \ - * **reverse**: Reverses the token string. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ - * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ - * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ - * **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ - * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ - * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ - * **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ - * **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ - * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ - * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ - * **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ - * **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ - * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. - */ -export type TokenFilterName = string; - -/** Known values of {@link CharFilterName} that the service accepts. */ -export enum KnownCharFilterName { - /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */ - HtmlStrip = "html_strip", -} - -/** - * Defines values for CharFilterName. \ - * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html - */ -export type CharFilterName = string; - -/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */ -export enum KnownVectorSearchAlgorithmMetric { - /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */ - Cosine = "cosine", - /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */ - Euclidean = "euclidean", - /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */ - DotProduct = "dotProduct", - /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */ - Hamming = "hamming", -} - -/** - * Defines values for VectorSearchAlgorithmMetric. \ - * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \ - * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \ - * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \ - * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. - */ -export type VectorSearchAlgorithmMetric = string; +export type VectorSearchAlgorithmMetric = string; /** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */ export enum KnownVectorSearchCompressionTarget { @@ -3313,36 +2757,6 @@ export enum KnownAzureOpenAIModelName { */ export type AzureOpenAIModelName = string; -/** Known values of {@link AIStudioModelCatalogName} that the service accepts. */ -export enum KnownAIStudioModelCatalogName { - /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */ - OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", - /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */ - OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", - /** FacebookDinoV2ImageEmbeddingsViTBase */ - FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", - /** FacebookDinoV2ImageEmbeddingsViTGiant */ - FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", - /** CohereEmbedV3English */ - CohereEmbedV3English = "Cohere-embed-v3-english", - /** CohereEmbedV3Multilingual */ - CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", -} - -/** - * Defines values for AIStudioModelCatalogName. \ - * {@link KnownAIStudioModelCatalogName} can be used interchangeably with AIStudioModelCatalogName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32** \ - * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336** \ - * **Facebook-DinoV2-Image-Embeddings-ViT-Base** \ - * **Facebook-DinoV2-Image-Embeddings-ViT-Giant** \ - * **Cohere-embed-v3-english** \ - * **Cohere-embed-v3-multilingual** - */ -export type AIStudioModelCatalogName = string; - /** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */ export enum KnownKeyPhraseExtractionSkillLanguage { /** Danish */ @@ -4486,48 +3900,6 @@ export enum KnownTextSplitMode { */ export type TextSplitMode = string; -/** Known values of {@link SplitSkillUnit} that the service accepts. */ -export enum KnownSplitSkillUnit { - /** The length will be measured by character. */ - Characters = "characters", - /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */ - AzureOpenAITokens = "azureOpenAITokens", -} - -/** - * Defines values for SplitSkillUnit. \ - * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **characters**: The length will be measured by character. \ - * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. - */ -export type SplitSkillUnit = string; - -/** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */ -export enum KnownSplitSkillEncoderModelName { - /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */ - R50KBase = "r50k_base", - /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */ - P50KBase = "p50k_base", - /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */ - P50KEdit = "p50k_edit", - /** A base model with a 100,000 token vocabulary. */ - CL100KBase = "cl100k_base", -} - -/** - * Defines values for SplitSkillEncoderModelName. \ - * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \ - * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \ - * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \ - * **cl100k_base**: A base model with a 100,000 token vocabulary. - */ -export type SplitSkillEncoderModelName = string; - /** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */ export enum KnownCustomEntityLookupSkillLanguage { /** Danish */ @@ -4795,51 +4167,6 @@ export enum KnownTextTranslationSkillLanguage { */ export type TextTranslationSkillLanguage = string; -/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillOutputMode { - /** Specify the deepest markdown header section to parse. */ - OneToMany = "oneToMany", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillOutputMode. \ - * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **oneToMany**: Specify the deepest markdown header section to parse. - */ -export type DocumentIntelligenceLayoutSkillOutputMode = string; - -/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */ -export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { - /** Header level 1. */ - H1 = "h1", - /** Header level 2. */ - H2 = "h2", - /** Header level 3. */ - H3 = "h3", - /** Header level 4. */ - H4 = "h4", - /** Header level 5. */ - H5 = "h5", - /** Header level 6. */ - H6 = "h6", -} - -/** - * Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \ - * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **h1**: Header level 1. \ - * **h2**: Header level 2. \ - * **h3**: Header level 3. \ - * **h4**: Header level 4. \ - * **h5**: Header level 5. \ - * **h6**: Header level 6. - */ -export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; - /** Known values of {@link LexicalTokenizerName} that the service accepts. */ export enum KnownLexicalTokenizerName { /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */ @@ -4891,6 +4218,135 @@ export enum KnownLexicalTokenizerName { */ export type LexicalTokenizerName = string; +/** Known values of {@link TokenFilterName} that the service accepts. */ +export enum KnownTokenFilterName { + /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */ + ArabicNormalization = "arabic_normalization", + /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */ + Apostrophe = "apostrophe", + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */ + CjkBigram = "cjk_bigram", + /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */ + CjkWidth = "cjk_width", + /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */ + Classic = "classic", + /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */ + CommonGram = "common_grams", + /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */ + EdgeNGram = "edgeNGram_v2", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ + Elision = "elision", + /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */ + GermanNormalization = "german_normalization", + /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */ + HindiNormalization = "hindi_normalization", + /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */ + IndicNormalization = "indic_normalization", + /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */ + KeywordRepeat = "keyword_repeat", + /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */ + KStem = "kstem", + /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */ + Length = "length", + /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */ + Limit = "limit", + /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */ + NGram = "nGram_v2", + /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */ + PersianNormalization = "persian_normalization", + /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */ + Phonetic = "phonetic", + /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */ + PorterStem = "porter_stem", + /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ + Reverse = "reverse", + /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */ + ScandinavianNormalization = "scandinavian_normalization", + /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */ + ScandinavianFoldingNormalization = "scandinavian_folding", + /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */ + Shingle = "shingle", + /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */ + Snowball = "snowball", + /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */ + SoraniNormalization = "sorani_normalization", + /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */ + Stemmer = "stemmer", + /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */ + Stopwords = "stopwords", + /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */ + Trim = "trim", + /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */ + Truncate = "truncate", + /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */ + Unique = "unique", + /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ + Uppercase = "uppercase", + /** Splits words into subwords and performs optional transformations on subword groups. */ + WordDelimiter = "word_delimiter", +} + +/** + * Defines values for TokenFilterName. \ + * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ + * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ + * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ + * **classic**: Removes English possessives, and dots from acronyms. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ + * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ + * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ + * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ + * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ + * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ + * **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ + * **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ + * **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ + * **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ + * **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ + * **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ + * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\/\/tartarus.org\/~martin\/PorterStemmer \ + * **reverse**: Reverses the token string. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ + * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ + * **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ + * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ + * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ + * **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ + * **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ + * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ + * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ + * **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ + * **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ + * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. + */ +export type TokenFilterName = string; + +/** Known values of {@link CharFilterName} that the service accepts. */ +export enum KnownCharFilterName { + /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */ + HtmlStrip = "html_strip", +} + +/** + * Defines values for CharFilterName. \ + * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html + */ +export type CharFilterName = string; + /** Known values of {@link RegexFlags} that the service accepts. */ export enum KnownRegexFlags { /** Enables canonical equivalence. */ @@ -5187,8 +4643,6 @@ export interface DataSourcesCreateOrUpdateOptionalParams ifMatch?: string; /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ ifNoneMatch?: string; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; } /** Contains response data for the createOrUpdate operation. */ @@ -5231,14 +4685,6 @@ export type DataSourcesCreateResponse = SearchIndexerDataSource; export interface IndexersResetOptionalParams extends coreClient.OperationOptions {} -/** Optional parameters. */ -export interface IndexersResetDocsOptionalParams - extends coreClient.OperationOptions { - keysOrIds?: DocumentKeysOrIds; - /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */ - overwrite?: boolean; -} - /** Optional parameters. */ export interface IndexersRunOptionalParams extends coreClient.OperationOptions {} @@ -5250,10 +4696,6 @@ export interface IndexersCreateOrUpdateOptionalParams ifMatch?: string; /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ ifNoneMatch?: string; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; - /** Disables cache reprocessing change detection. */ - disableCacheReprocessingChangeDetection?: boolean; } /** Contains response data for the createOrUpdate operation. */ @@ -5306,10 +4748,6 @@ export interface SkillsetsCreateOrUpdateOptionalParams ifMatch?: string; /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ ifNoneMatch?: string; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; - /** Disables cache reprocessing change detection. */ - disableCacheReprocessingChangeDetection?: boolean; } /** Contains response data for the createOrUpdate operation. */ @@ -5348,10 +4786,6 @@ export interface SkillsetsCreateOptionalParams /** Contains response data for the create operation. */ export type SkillsetsCreateResponse = SearchIndexerSkillset; -/** Optional parameters. */ -export interface SkillsetsResetSkillsOptionalParams - extends coreClient.OperationOptions {} - /** Optional parameters. */ export interface SynonymMapsCreateOrUpdateOptionalParams extends coreClient.OperationOptions { @@ -5456,47 +4890,6 @@ export interface IndexesAnalyzeOptionalParams /** Contains response data for the analyze operation. */ export type IndexesAnalyzeResponse = AnalyzeResult; -/** Optional parameters. */ -export interface AliasesCreateOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the create operation. */ -export type AliasesCreateResponse = SearchAlias; - -/** Optional parameters. */ -export interface AliasesListOptionalParams - extends coreClient.OperationOptions {} - -/** Contains response data for the list operation. */ -export type AliasesListResponse = ListAliasesResult; - -/** Optional parameters. */ -export interface AliasesCreateOrUpdateOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Contains response data for the createOrUpdate operation. */ -export type AliasesCreateOrUpdateResponse = SearchAlias; - -/** Optional parameters. */ -export interface AliasesDeleteOptionalParams - extends coreClient.OperationOptions { - /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */ - ifMatch?: string; - /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */ - ifNoneMatch?: string; -} - -/** Optional parameters. */ -export interface AliasesGetOptionalParams extends coreClient.OperationOptions {} - -/** Contains response data for the get operation. */ -export type AliasesGetResponse = SearchAlias; - /** Optional parameters. */ export interface GetServiceStatisticsOptionalParams extends coreClient.OperationOptions {} diff --git a/sdk/search/search-documents/src/generated/service/models/mappers.ts b/sdk/search/search-documents/src/generated/service/models/mappers.ts index 921dceeb787a..7c580008a25c 100644 --- a/sdk/search/search-documents/src/generated/service/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/service/models/mappers.ts @@ -47,13 +47,6 @@ export const SearchIndexerDataSource: coreClient.CompositeMapper = { className: "SearchIndexerDataContainer", }, }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, dataChangeDetectionPolicy: { serializedName: "dataChangeDetectionPolicy", type: { @@ -122,27 +115,6 @@ export const SearchIndexerDataContainer: coreClient.CompositeMapper = { }, }; -export const SearchIndexerDataIdentity: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - uberParent: "SearchIndexerDataIdentity", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - export const DataChangeDetectionPolicy: coreClient.CompositeMapper = { type: { name: "Composite", @@ -218,13 +190,6 @@ export const SearchResourceEncryptionKey: coreClient.CompositeMapper = { className: "AzureActiveDirectoryApplicationCredentials", }, }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, }, }, }; @@ -371,37 +336,6 @@ export const ListDataSourcesResult: coreClient.CompositeMapper = { }, }; -export const DocumentKeysOrIds: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "DocumentKeysOrIds", - modelProperties: { - documentKeys: { - serializedName: "documentKeys", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - datasourceDocumentIds: { - serializedName: "datasourceDocumentIds", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - export const SearchIndexer: coreClient.CompositeMapper = { type: { name: "Composite", @@ -499,13 +433,6 @@ export const SearchIndexer: coreClient.CompositeMapper = { className: "SearchResourceEncryptionKey", }, }, - cache: { - serializedName: "cache", - type: { - name: "Composite", - className: "SearchIndexerCache", - }, - }, }, }, }; @@ -638,22 +565,6 @@ export const IndexingParametersConfiguration: coreClient.CompositeMapper = { name: "Boolean", }, }, - markdownParsingSubmode: { - defaultValue: "oneToMany", - serializedName: "markdownParsingSubmode", - nullable: true, - type: { - name: "String", - }, - }, - markdownHeaderDepth: { - defaultValue: "h6", - serializedName: "markdownHeaderDepth", - nullable: true, - type: { - name: "String", - }, - }, documentRoot: { serializedName: "documentRoot", type: { @@ -759,35 +670,6 @@ export const FieldMappingFunction: coreClient.CompositeMapper = { }, }; -export const SearchIndexerCache: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchIndexerCache", - modelProperties: { - storageConnectionString: { - serializedName: "storageConnectionString", - type: { - name: "String", - }, - }, - enableReprocessing: { - serializedName: "enableReprocessing", - nullable: true, - type: { - name: "Boolean", - }, - }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - export const ListIndexersResult: coreClient.CompositeMapper = { type: { name: "Composite", @@ -871,20 +753,6 @@ export const IndexerExecutionResult: coreClient.CompositeMapper = { allowedValues: ["transientFailure", "success", "inProgress", "reset"], }, }, - statusDetail: { - serializedName: "statusDetail", - readOnly: true, - type: { - name: "String", - }, - }, - currentState: { - serializedName: "currentState", - type: { - name: "Composite", - className: "IndexerState", - }, - }, errorMessage: { serializedName: "errorMessage", readOnly: true, @@ -969,74 +837,6 @@ export const IndexerExecutionResult: coreClient.CompositeMapper = { }, }; -export const IndexerState: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "IndexerState", - modelProperties: { - mode: { - serializedName: "mode", - readOnly: true, - type: { - name: "String", - }, - }, - allDocumentsInitialChangeTrackingState: { - serializedName: "allDocsInitialChangeTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - allDocumentsFinalChangeTrackingState: { - serializedName: "allDocsFinalChangeTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resetDocumentsInitialChangeTrackingState: { - serializedName: "resetDocsInitialChangeTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resetDocumentsFinalChangeTrackingState: { - serializedName: "resetDocsFinalChangeTrackingState", - readOnly: true, - type: { - name: "String", - }, - }, - resetDocumentKeys: { - serializedName: "resetDocumentKeys", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - resetDatasourceDocumentIds: { - serializedName: "resetDatasourceDocumentIds", - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - export const SearchIndexerError: coreClient.CompositeMapper = { type: { name: "Composite", @@ -1413,20 +1213,6 @@ export const SearchIndexerKnowledgeStore: coreClient.CompositeMapper = { }, }, }, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - parameters: { - serializedName: "parameters", - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreParameters", - }, - }, }, }, }; @@ -1524,24 +1310,6 @@ export const SearchIndexerKnowledgeStoreProjectionSelector: coreClient.Composite }, }; -export const SearchIndexerKnowledgeStoreParameters: coreClient.CompositeMapper = - { - type: { - name: "Composite", - className: "SearchIndexerKnowledgeStoreParameters", - additionalProperties: { type: { name: "Object" } }, - modelProperties: { - synthesizeGeneratedKeyName: { - defaultValue: false, - serializedName: "synthesizeGeneratedKeyName", - type: { - name: "Boolean", - }, - }, - }, - }, - }; - export const SearchIndexerIndexProjection: coreClient.CompositeMapper = { type: { name: "Composite", @@ -1655,26 +1423,6 @@ export const ListSkillsetsResult: coreClient.CompositeMapper = { }, }; -export const SkillNames: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SkillNames", - modelProperties: { - skillNames: { - serializedName: "skillNames", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - export const SynonymMap: coreClient.CompositeMapper = { type: { name: "Composite", @@ -1852,18 +1600,6 @@ export const SearchIndex: coreClient.CompositeMapper = { }, }, }, - normalizers: { - serializedName: "normalizers", - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "LexicalNormalizer", - }, - }, - }, - }, encryptionKey: { serializedName: "encryptionKey", type: { @@ -1984,13 +1720,6 @@ export const SearchField: coreClient.CompositeMapper = { name: "String", }, }, - normalizer: { - serializedName: "normalizer", - nullable: true, - type: { - name: "String", - }, - }, vectorSearchDimensions: { constraints: { InclusiveMaximum: 2048, @@ -2326,34 +2055,6 @@ export const CharFilter: coreClient.CompositeMapper = { }, }; -export const LexicalNormalizer: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "LexicalNormalizer", - uberParent: "LexicalNormalizer", - polymorphicDiscriminator: { - serializedName: "@odata\\.type", - clientName: "odatatype", - }, - modelProperties: { - odatatype: { - serializedName: "@odata\\.type", - required: true, - type: { - name: "String", - }, - }, - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - export const Similarity: coreClient.CompositeMapper = { type: { name: "Composite", @@ -2667,64 +2368,19 @@ export const VectorSearchCompression: coreClient.CompositeMapper = { name: "Number", }, }, - rescoringOptions: { - serializedName: "rescoringOptions", - type: { - name: "Composite", - className: "RescoringOptions", - }, - }, - truncationDimension: { - serializedName: "truncationDimension", - nullable: true, - type: { - name: "Number", - }, - }, }, }, }; -export const RescoringOptions: coreClient.CompositeMapper = { +export const ListIndexesResult: coreClient.CompositeMapper = { type: { name: "Composite", - className: "RescoringOptions", + className: "ListIndexesResult", modelProperties: { - enableRescoring: { - defaultValue: true, - serializedName: "enableRescoring", - nullable: true, - type: { - name: "Boolean", - }, - }, - defaultOversampling: { - serializedName: "defaultOversampling", - nullable: true, - type: { - name: "Number", - }, - }, - rescoreStorageMethod: { - serializedName: "rescoreStorageMethod", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ListIndexesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListIndexesResult", - modelProperties: { - indexes: { - serializedName: "value", - required: true, - readOnly: true, + indexes: { + serializedName: "value", + required: true, + readOnly: true, type: { name: "Sequence", element: { @@ -2796,12 +2452,6 @@ export const AnalyzeRequest: coreClient.CompositeMapper = { name: "String", }, }, - normalizer: { - serializedName: "normalizer", - type: { - name: "String", - }, - }, tokenFilters: { serializedName: "tokenFilters", type: { @@ -2891,63 +2541,6 @@ export const AnalyzedTokenInfo: coreClient.CompositeMapper = { }, }; -export const SearchAlias: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "SearchAlias", - modelProperties: { - name: { - serializedName: "name", - required: true, - type: { - name: "String", - }, - }, - indexes: { - serializedName: "indexes", - required: true, - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - etag: { - serializedName: "@odata\\.etag", - type: { - name: "String", - }, - }, - }, - }, -}; - -export const ListAliasesResult: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "ListAliasesResult", - modelProperties: { - aliases: { - serializedName: "value", - required: true, - readOnly: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "SearchAlias", - }, - }, - }, - }, - }, - }, -}; - export const ServiceStatistics: coreClient.CompositeMapper = { type: { name: "Composite", @@ -2976,13 +2569,6 @@ export const ServiceCounters: coreClient.CompositeMapper = { name: "Composite", className: "ServiceCounters", modelProperties: { - aliasCounter: { - serializedName: "aliasesCount", - type: { - name: "Composite", - className: "ResourceCounter", - }, - }, documentCounter: { serializedName: "documentCount", type: { @@ -3234,6 +2820,27 @@ export const AzureOpenAIParameters: coreClient.CompositeMapper = { }, }; +export const SearchIndexerDataIdentity: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "SearchIndexerDataIdentity", + uberParent: "SearchIndexerDataIdentity", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "odatatype", + }, + modelProperties: { + odatatype: { + serializedName: "@odata\\.type", + required: true, + type: { + name: "String", + }, + }, + }, + }, +}; + export const WebApiParameters: coreClient.CompositeMapper = { type: { name: "Composite", @@ -3282,94 +2889,6 @@ export const WebApiParameters: coreClient.CompositeMapper = { }, }; -export const AIServicesVisionParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AIServicesVisionParameters", - modelProperties: { - modelVersion: { - serializedName: "modelVersion", - required: true, - nullable: true, - type: { - name: "String", - }, - }, - resourceUri: { - serializedName: "resourceUri", - required: true, - type: { - name: "String", - }, - }, - apiKey: { - serializedName: "apiKey", - type: { - name: "String", - }, - }, - authIdentity: { - serializedName: "authIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - }, - }, -}; - -export const AMLParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AMLParameters", - modelProperties: { - scoringUri: { - serializedName: "uri", - required: true, - nullable: true, - type: { - name: "String", - }, - }, - authenticationKey: { - serializedName: "key", - nullable: true, - type: { - name: "String", - }, - }, - resourceId: { - serializedName: "resourceId", - nullable: true, - type: { - name: "String", - }, - }, - timeout: { - serializedName: "timeout", - nullable: true, - type: { - name: "TimeSpan", - }, - }, - region: { - serializedName: "region", - nullable: true, - type: { - name: "String", - }, - }, - modelName: { - serializedName: "modelName", - type: { - name: "String", - }, - }, - }, - }, -}; - export const DistanceScoringParameters: coreClient.CompositeMapper = { type: { name: "Composite", @@ -3454,6 +2973,24 @@ export const TagScoringParameters: coreClient.CompositeMapper = { }, }; +export const SearchIndexerKnowledgeStoreParameters: coreClient.CompositeMapper = + { + type: { + name: "Composite", + className: "SearchIndexerKnowledgeStoreParameters", + additionalProperties: { type: { name: "Object" } }, + modelProperties: { + synthesizeGeneratedKeyName: { + defaultValue: false, + serializedName: "synthesizeGeneratedKeyName", + type: { + name: "Boolean", + }, + }, + }, + }, + }; + export const CustomEntity: coreClient.CompositeMapper = { type: { name: "Composite", @@ -3590,69 +3127,6 @@ export const CustomEntityAlias: coreClient.CompositeMapper = { }, }; -export const AzureOpenAITokenizerParameters: coreClient.CompositeMapper = { - type: { - name: "Composite", - className: "AzureOpenAITokenizerParameters", - modelProperties: { - encoderModelName: { - serializedName: "encoderModelName", - nullable: true, - type: { - name: "String", - }, - }, - allowedSpecialTokens: { - serializedName: "allowedSpecialTokens", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - -export const SearchIndexerDataNoneIdentity: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.DataNoneIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataNoneIdentity", - uberParent: "SearchIndexerDataIdentity", - polymorphicDiscriminator: - SearchIndexerDataIdentity.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerDataIdentity.type.modelProperties, - }, - }, -}; - -export const SearchIndexerDataUserAssignedIdentity: coreClient.CompositeMapper = - { - serializedName: "#Microsoft.Azure.Search.DataUserAssignedIdentity", - type: { - name: "Composite", - className: "SearchIndexerDataUserAssignedIdentity", - uberParent: "SearchIndexerDataIdentity", - polymorphicDiscriminator: - SearchIndexerDataIdentity.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerDataIdentity.type.modelProperties, - resourceId: { - serializedName: "userAssignedIdentity", - required: true, - type: { - name: "String", - }, - }, - }, - }, - }; - export const HighWaterMarkChangeDetectionPolicy: coreClient.CompositeMapper = { serializedName: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy", type: { @@ -3716,22 +3190,6 @@ export const SoftDeleteColumnDeletionDetectionPolicy: coreClient.CompositeMapper }, }; -export const NativeBlobSoftDeleteDeletionDetectionPolicy: coreClient.CompositeMapper = - { - serializedName: - "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy", - type: { - name: "Composite", - className: "NativeBlobSoftDeleteDeletionDetectionPolicy", - uberParent: "DataDeletionDetectionPolicy", - polymorphicDiscriminator: - DataDeletionDetectionPolicy.type.polymorphicDiscriminator, - modelProperties: { - ...DataDeletionDetectionPolicy.type.modelProperties, - }, - }, - }; - export const ConditionalSkill: coreClient.CompositeMapper = { serializedName: "#Microsoft.Skills.Util.ConditionalSkill", type: { @@ -4216,20 +3674,6 @@ export const SplitSkill: coreClient.CompositeMapper = { name: "Number", }, }, - unit: { - serializedName: "unit", - nullable: true, - type: { - name: "String", - }, - }, - azureOpenAITokenizerParameters: { - serializedName: "azureOpenAITokenizerParameters", - type: { - name: "Composite", - className: "AzureOpenAITokenizerParameters", - }, - }, }, }, }; @@ -4251,281 +3695,137 @@ export const CustomEntityLookupSkill: coreClient.CompositeMapper = { }, }, entitiesDefinitionUri: { - serializedName: "entitiesDefinitionUri", - nullable: true, - type: { - name: "String", - }, - }, - inlineEntitiesDefinition: { - serializedName: "inlineEntitiesDefinition", - nullable: true, - type: { - name: "Sequence", - element: { - type: { - name: "Composite", - className: "CustomEntity", - }, - }, - }, - }, - globalDefaultCaseSensitive: { - serializedName: "globalDefaultCaseSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - globalDefaultAccentSensitive: { - serializedName: "globalDefaultAccentSensitive", - nullable: true, - type: { - name: "Boolean", - }, - }, - globalDefaultFuzzyEditDistance: { - serializedName: "globalDefaultFuzzyEditDistance", - nullable: true, - type: { - name: "Number", - }, - }, - }, - }, -}; - -export const TextTranslationSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.TranslationSkill", - type: { - name: "Composite", - className: "TextTranslationSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - defaultToLanguageCode: { - serializedName: "defaultToLanguageCode", - required: true, - type: { - name: "String", - }, - }, - defaultFromLanguageCode: { - serializedName: "defaultFromLanguageCode", - type: { - name: "String", - }, - }, - suggestedFrom: { - serializedName: "suggestedFrom", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const DocumentExtractionSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.DocumentExtractionSkill", - type: { - name: "Composite", - className: "DocumentExtractionSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - parsingMode: { - serializedName: "parsingMode", - nullable: true, - type: { - name: "String", - }, - }, - dataToExtract: { - serializedName: "dataToExtract", - nullable: true, - type: { - name: "String", - }, - }, - configuration: { - serializedName: "configuration", - nullable: true, - type: { - name: "Dictionary", - value: { type: { name: "any" } }, - }, - }, - }, - }, -}; - -export const DocumentIntelligenceLayoutSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill", - type: { - name: "Composite", - className: "DocumentIntelligenceLayoutSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - outputMode: { - defaultValue: "oneToMany", - serializedName: "outputMode", - nullable: true, - type: { - name: "String", - }, - }, - markdownHeaderDepth: { - defaultValue: "h6", - serializedName: "markdownHeaderDepth", - nullable: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const WebApiSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Custom.WebApiSkill", - type: { - name: "Composite", - className: "WebApiSkill", - uberParent: "SearchIndexerSkill", - polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, - modelProperties: { - ...SearchIndexerSkill.type.modelProperties, - uri: { - serializedName: "uri", - required: true, - type: { - name: "String", - }, - }, - httpHeaders: { - serializedName: "httpHeaders", - type: { - name: "Dictionary", - value: { type: { name: "String" } }, - }, - }, - httpMethod: { - serializedName: "httpMethod", - type: { - name: "String", - }, - }, - timeout: { - serializedName: "timeout", + serializedName: "entitiesDefinitionUri", + nullable: true, type: { - name: "TimeSpan", + name: "String", }, }, - batchSize: { - serializedName: "batchSize", + inlineEntitiesDefinition: { + serializedName: "inlineEntitiesDefinition", nullable: true, type: { - name: "Number", + name: "Sequence", + element: { + type: { + name: "Composite", + className: "CustomEntity", + }, + }, }, }, - degreeOfParallelism: { - serializedName: "degreeOfParallelism", + globalDefaultCaseSensitive: { + serializedName: "globalDefaultCaseSensitive", nullable: true, type: { - name: "Number", + name: "Boolean", }, }, - authResourceId: { - serializedName: "authResourceId", + globalDefaultAccentSensitive: { + serializedName: "globalDefaultAccentSensitive", nullable: true, type: { - name: "String", + name: "Boolean", }, }, - authIdentity: { - serializedName: "authIdentity", + globalDefaultFuzzyEditDistance: { + serializedName: "globalDefaultFuzzyEditDistance", + nullable: true, type: { - name: "Composite", - className: "SearchIndexerDataIdentity", + name: "Number", }, }, }, }, }; -export const AzureMachineLearningSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Custom.AmlSkill", +export const TextTranslationSkill: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.TranslationSkill", type: { name: "Composite", - className: "AzureMachineLearningSkill", + className: "TextTranslationSkill", uberParent: "SearchIndexerSkill", polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { ...SearchIndexerSkill.type.modelProperties, - scoringUri: { - serializedName: "uri", - nullable: true, + defaultToLanguageCode: { + serializedName: "defaultToLanguageCode", + required: true, type: { name: "String", }, }, - authenticationKey: { - serializedName: "key", - nullable: true, + defaultFromLanguageCode: { + serializedName: "defaultFromLanguageCode", type: { name: "String", }, }, - resourceId: { - serializedName: "resourceId", + suggestedFrom: { + serializedName: "suggestedFrom", nullable: true, type: { name: "String", }, }, - timeout: { - serializedName: "timeout", + }, + }, +}; + +export const DocumentExtractionSkill: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Skills.Util.DocumentExtractionSkill", + type: { + name: "Composite", + className: "DocumentExtractionSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + parsingMode: { + serializedName: "parsingMode", nullable: true, type: { - name: "TimeSpan", + name: "String", }, }, - region: { - serializedName: "region", + dataToExtract: { + serializedName: "dataToExtract", nullable: true, type: { name: "String", }, }, - degreeOfParallelism: { - serializedName: "degreeOfParallelism", + configuration: { + serializedName: "configuration", nullable: true, type: { - name: "Number", + name: "Dictionary", + value: { type: { name: "any" } }, }, }, }, }, }; -export const AzureOpenAIEmbeddingSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill", +export const WebApiSkill: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Skills.Custom.WebApiSkill", type: { name: "Composite", - className: "AzureOpenAIEmbeddingSkill", + className: "WebApiSkill", uberParent: "SearchIndexerSkill", polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { ...SearchIndexerSkill.type.modelProperties, - ...AzureOpenAIParameters.type.modelProperties, - dimensions: { - serializedName: "dimensions", + ...WebApiParameters.type.modelProperties, + batchSize: { + serializedName: "batchSize", + nullable: true, + type: { + name: "Number", + }, + }, + degreeOfParallelism: { + serializedName: "degreeOfParallelism", nullable: true, type: { name: "Number", @@ -4535,21 +3835,21 @@ export const AzureOpenAIEmbeddingSkill: coreClient.CompositeMapper = { }, }; -export const VisionVectorizeSkill: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Skills.Vision.VectorizeSkill", +export const AzureOpenAIEmbeddingSkill: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill", type: { name: "Composite", - className: "VisionVectorizeSkill", + className: "AzureOpenAIEmbeddingSkill", uberParent: "SearchIndexerSkill", polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { ...SearchIndexerSkill.type.modelProperties, - modelVersion: { - serializedName: "modelVersion", - required: true, + ...AzureOpenAIParameters.type.modelProperties, + dimensions: { + serializedName: "dimensions", nullable: true, type: { - name: "String", + name: "Number", }, }, }, @@ -4591,62 +3891,6 @@ export const CognitiveServicesAccountKey: coreClient.CompositeMapper = { }, }; -export const AIServicesAccountKey: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.AIServicesByKey", - type: { - name: "Composite", - className: "AIServicesAccountKey", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: - CognitiveServicesAccount.type.polymorphicDiscriminator, - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - key: { - serializedName: "key", - required: true, - type: { - name: "String", - }, - }, - subdomainUrl: { - serializedName: "subdomainUrl", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - -export const AIServicesAccountIdentity: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.AIServicesByIdentity", - type: { - name: "Composite", - className: "AIServicesAccountIdentity", - uberParent: "CognitiveServicesAccount", - polymorphicDiscriminator: - CognitiveServicesAccount.type.polymorphicDiscriminator, - modelProperties: { - ...CognitiveServicesAccount.type.modelProperties, - identity: { - serializedName: "identity", - type: { - name: "Composite", - className: "SearchIndexerDataIdentity", - }, - }, - subdomainUrl: { - serializedName: "subdomainUrl", - required: true, - type: { - name: "String", - }, - }, - }, - }, -}; - export const SearchIndexerKnowledgeStoreTableProjectionSelector: coreClient.CompositeMapper = { type: { @@ -6425,41 +5669,6 @@ export const PatternReplaceCharFilter: coreClient.CompositeMapper = { }, }; -export const CustomNormalizer: coreClient.CompositeMapper = { - serializedName: "#Microsoft.Azure.Search.CustomNormalizer", - type: { - name: "Composite", - className: "CustomNormalizer", - uberParent: "LexicalNormalizer", - polymorphicDiscriminator: LexicalNormalizer.type.polymorphicDiscriminator, - modelProperties: { - ...LexicalNormalizer.type.modelProperties, - tokenFilters: { - serializedName: "tokenFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - charFilters: { - serializedName: "charFilters", - type: { - name: "Sequence", - element: { - type: { - name: "String", - }, - }, - }, - }, - }, - }, -}; - export const ClassicSimilarity: coreClient.CompositeMapper = { serializedName: "#Microsoft.Azure.Search.ClassicSimilarity", type: { @@ -6584,48 +5793,6 @@ export const WebApiVectorizer: coreClient.CompositeMapper = { }, }; -export const AIServicesVisionVectorizer: coreClient.CompositeMapper = { - serializedName: "aiServicesVision", - type: { - name: "Composite", - className: "AIServicesVisionVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: - VectorSearchVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchVectorizer.type.modelProperties, - aIServicesVisionParameters: { - serializedName: "aiServicesVisionParameters", - type: { - name: "Composite", - className: "AIServicesVisionParameters", - }, - }, - }, - }, -}; - -export const AMLVectorizer: coreClient.CompositeMapper = { - serializedName: "aml", - type: { - name: "Composite", - className: "AMLVectorizer", - uberParent: "VectorSearchVectorizer", - polymorphicDiscriminator: - VectorSearchVectorizer.type.polymorphicDiscriminator, - modelProperties: { - ...VectorSearchVectorizer.type.modelProperties, - aMLParameters: { - serializedName: "amlParameters", - type: { - name: "Composite", - className: "AMLParameters", - }, - }, - }, - }, -}; - export const ScalarQuantizationCompression: coreClient.CompositeMapper = { serializedName: "scalarQuantization", type: { @@ -6661,6 +5828,42 @@ export const BinaryQuantizationCompression: coreClient.CompositeMapper = { }, }; +export const SearchIndexerDataNoneIdentity: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.DataNoneIdentity", + type: { + name: "Composite", + className: "SearchIndexerDataNoneIdentity", + uberParent: "SearchIndexerDataIdentity", + polymorphicDiscriminator: + SearchIndexerDataIdentity.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerDataIdentity.type.modelProperties, + }, + }, +}; + +export const SearchIndexerDataUserAssignedIdentity: coreClient.CompositeMapper = + { + serializedName: "#Microsoft.Azure.Search.DataUserAssignedIdentity", + type: { + name: "Composite", + className: "SearchIndexerDataUserAssignedIdentity", + uberParent: "SearchIndexerDataIdentity", + polymorphicDiscriminator: + SearchIndexerDataIdentity.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerDataIdentity.type.modelProperties, + resourceId: { + serializedName: "userAssignedIdentity", + required: true, + type: { + name: "String", + }, + }, + }, + }, + }; + export const SearchIndexerKnowledgeStoreObjectProjectionSelector: coreClient.CompositeMapper = { type: { @@ -6686,7 +5889,6 @@ export const SearchIndexerKnowledgeStoreFileProjectionSelector: coreClient.Compo }; export let discriminators = { - SearchIndexerDataIdentity: SearchIndexerDataIdentity, DataChangeDetectionPolicy: DataChangeDetectionPolicy, DataDeletionDetectionPolicy: DataDeletionDetectionPolicy, SearchIndexerSkill: SearchIndexerSkill, @@ -6696,23 +5898,17 @@ export let discriminators = { LexicalTokenizer: LexicalTokenizer, TokenFilter: TokenFilter, CharFilter: CharFilter, - LexicalNormalizer: LexicalNormalizer, Similarity: Similarity, VectorSearchAlgorithmConfiguration: VectorSearchAlgorithmConfiguration, VectorSearchVectorizer: VectorSearchVectorizer, VectorSearchCompression: VectorSearchCompression, - "SearchIndexerDataIdentity.#Microsoft.Azure.Search.DataNoneIdentity": - SearchIndexerDataNoneIdentity, - "SearchIndexerDataIdentity.#Microsoft.Azure.Search.DataUserAssignedIdentity": - SearchIndexerDataUserAssignedIdentity, + SearchIndexerDataIdentity: SearchIndexerDataIdentity, "DataChangeDetectionPolicy.#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": HighWaterMarkChangeDetectionPolicy, "DataChangeDetectionPolicy.#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": SqlIntegratedChangeTrackingPolicy, "DataDeletionDetectionPolicy.#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": SoftDeleteColumnDeletionDetectionPolicy, - "DataDeletionDetectionPolicy.#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": - NativeBlobSoftDeleteDeletionDetectionPolicy, "SearchIndexerSkill.#Microsoft.Skills.Util.ConditionalSkill": ConditionalSkill, "SearchIndexerSkill.#Microsoft.Skills.Text.KeyPhraseExtractionSkill": @@ -6742,23 +5938,13 @@ export let discriminators = { TextTranslationSkill, "SearchIndexerSkill.#Microsoft.Skills.Util.DocumentExtractionSkill": DocumentExtractionSkill, - "SearchIndexerSkill.#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": - DocumentIntelligenceLayoutSkill, "SearchIndexerSkill.#Microsoft.Skills.Custom.WebApiSkill": WebApiSkill, - "SearchIndexerSkill.#Microsoft.Skills.Custom.AmlSkill": - AzureMachineLearningSkill, "SearchIndexerSkill.#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": AzureOpenAIEmbeddingSkill, - "SearchIndexerSkill.#Microsoft.Skills.Vision.VectorizeSkill": - VisionVectorizeSkill, "CognitiveServicesAccount.#Microsoft.Azure.Search.DefaultCognitiveServices": DefaultCognitiveServicesAccount, "CognitiveServicesAccount.#Microsoft.Azure.Search.CognitiveServicesByKey": CognitiveServicesAccountKey, - "CognitiveServicesAccount.#Microsoft.Azure.Search.AIServicesByKey": - AIServicesAccountKey, - "CognitiveServicesAccount.#Microsoft.Azure.Search.AIServicesByIdentity": - AIServicesAccountIdentity, "ScoringFunction.distance": DistanceScoringFunction, "ScoringFunction.freshness": FreshnessScoringFunction, "ScoringFunction.magnitude": MagnitudeScoringFunction, @@ -6831,8 +6017,6 @@ export let discriminators = { "CharFilter.#Microsoft.Azure.Search.MappingCharFilter": MappingCharFilter, "CharFilter.#Microsoft.Azure.Search.PatternReplaceCharFilter": PatternReplaceCharFilter, - "LexicalNormalizer.#Microsoft.Azure.Search.CustomNormalizer": - CustomNormalizer, "Similarity.#Microsoft.Azure.Search.ClassicSimilarity": ClassicSimilarity, "Similarity.#Microsoft.Azure.Search.BM25Similarity": BM25Similarity, "VectorSearchAlgorithmConfiguration.hnsw": HnswAlgorithmConfiguration, @@ -6840,8 +6024,10 @@ export let discriminators = { ExhaustiveKnnAlgorithmConfiguration, "VectorSearchVectorizer.azureOpenAI": AzureOpenAIVectorizer, "VectorSearchVectorizer.customWebApi": WebApiVectorizer, - "VectorSearchVectorizer.aiServicesVision": AIServicesVisionVectorizer, - "VectorSearchVectorizer.aml": AMLVectorizer, "VectorSearchCompression.scalarQuantization": ScalarQuantizationCompression, "VectorSearchCompression.binaryQuantization": BinaryQuantizationCompression, + "SearchIndexerDataIdentity.#Microsoft.Azure.Search.DataNoneIdentity": + SearchIndexerDataNoneIdentity, + "SearchIndexerDataIdentity.#Microsoft.Azure.Search.DataUserAssignedIdentity": + SearchIndexerDataUserAssignedIdentity, }; diff --git a/sdk/search/search-documents/src/generated/service/models/parameters.ts b/sdk/search/search-documents/src/generated/service/models/parameters.ts index cc24da1b6b80..12db0fcabf41 100644 --- a/sdk/search/search-documents/src/generated/service/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/service/models/parameters.ts @@ -13,15 +13,12 @@ import { } from "@azure/core-client"; import { SearchIndexerDataSource as SearchIndexerDataSourceMapper, - DocumentKeysOrIds as DocumentKeysOrIdsMapper, SearchIndexer as SearchIndexerMapper, SearchIndexerSkillset as SearchIndexerSkillsetMapper, - SkillNames as SkillNamesMapper, SynonymMap as SynonymMapMapper, SearchIndex as SearchIndexMapper, AnalyzeRequest as AnalyzeRequestMapper, - SearchAlias as SearchAliasMapper, -} from "../models/mappers.js"; +} from "../models/mappers"; export const contentType: OperationParameter = { parameterPath: ["options", "contentType"], @@ -118,16 +115,6 @@ export const apiVersion: OperationQueryParameter = { }, }; -export const skipIndexerResetRequirementForCache: OperationQueryParameter = { - parameterPath: ["options", "skipIndexerResetRequirementForCache"], - mapper: { - serializedName: "ignoreResetRequirements", - type: { - name: "Boolean", - }, - }, -}; - export const select: OperationQueryParameter = { parameterPath: ["options", "select"], mapper: { @@ -149,38 +136,11 @@ export const indexerName: OperationURLParameter = { }, }; -export const keysOrIds: OperationParameter = { - parameterPath: ["options", "keysOrIds"], - mapper: DocumentKeysOrIdsMapper, -}; - -export const overwrite: OperationQueryParameter = { - parameterPath: ["options", "overwrite"], - mapper: { - defaultValue: false, - serializedName: "overwrite", - type: { - name: "Boolean", - }, - }, -}; - export const indexer: OperationParameter = { parameterPath: "indexer", mapper: SearchIndexerMapper, }; -export const disableCacheReprocessingChangeDetection: OperationQueryParameter = - { - parameterPath: ["options", "disableCacheReprocessingChangeDetection"], - mapper: { - serializedName: "disableCacheReprocessingChangeDetection", - type: { - name: "Boolean", - }, - }, - }; - export const skillset: OperationParameter = { parameterPath: "skillset", mapper: SearchIndexerSkillsetMapper, @@ -197,11 +157,6 @@ export const skillsetName: OperationURLParameter = { }, }; -export const skillNames: OperationParameter = { - parameterPath: "skillNames", - mapper: SkillNamesMapper, -}; - export const synonymMap: OperationParameter = { parameterPath: "synonymMap", mapper: SynonymMapMapper, @@ -248,19 +203,3 @@ export const request: OperationParameter = { parameterPath: "request", mapper: AnalyzeRequestMapper, }; - -export const alias: OperationParameter = { - parameterPath: "alias", - mapper: SearchAliasMapper, -}; - -export const aliasName: OperationURLParameter = { - parameterPath: "aliasName", - mapper: { - serializedName: "aliasName", - required: true, - type: { - name: "String", - }, - }, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/aliases.ts b/sdk/search/search-documents/src/generated/service/operations/aliases.ts deleted file mode 100644 index 32ca20eb76a4..000000000000 --- a/sdk/search/search-documents/src/generated/service/operations/aliases.ts +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { Aliases } from "../operationsInterfaces/index.js"; -import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; -import { - SearchAlias, - AliasesCreateOptionalParams, - AliasesCreateResponse, - AliasesListOptionalParams, - AliasesListResponse, - AliasesCreateOrUpdateOptionalParams, - AliasesCreateOrUpdateResponse, - AliasesDeleteOptionalParams, - AliasesGetOptionalParams, - AliasesGetResponse, -} from "../models/index.js"; - -/** Class containing Aliases operations. */ -export class AliasesImpl implements Aliases { - private readonly client: SearchServiceClient; - - /** - * Initialize a new instance of the class Aliases class. - * @param client Reference to the service client - */ - constructor(client: SearchServiceClient) { - this.client = client; - } - - /** - * Creates a new search alias. - * @param alias The definition of the alias to create. - * @param options The options parameters. - */ - create( - alias: SearchAlias, - options?: AliasesCreateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { alias, options }, - createOperationSpec, - ); - } - - /** - * Lists all aliases available for a search service. - * @param options The options parameters. - */ - list(options?: AliasesListOptionalParams): Promise { - return this.client.sendOperationRequest({ options }, listOperationSpec); - } - - /** - * Creates a new search alias or updates an alias if it already exists. - * @param aliasName The definition of the alias to create or update. - * @param alias The definition of the alias to create or update. - * @param options The options parameters. - */ - createOrUpdate( - aliasName: string, - alias: SearchAlias, - options?: AliasesCreateOrUpdateOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { aliasName, alias, options }, - createOrUpdateOperationSpec, - ); - } - - /** - * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no - * recovery option. The mapped index is untouched by this operation. - * @param aliasName The name of the alias to delete. - * @param options The options parameters. - */ - delete( - aliasName: string, - options?: AliasesDeleteOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { aliasName, options }, - deleteOperationSpec, - ); - } - - /** - * Retrieves an alias definition. - * @param aliasName The name of the alias to retrieve. - * @param options The options parameters. - */ - get( - aliasName: string, - options?: AliasesGetOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { aliasName, options }, - getOperationSpec, - ); - } -} -// Operation Specifications -const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); - -const createOperationSpec: coreClient.OperationSpec = { - path: "/aliases", - httpMethod: "POST", - responses: { - 201: { - bodyMapper: Mappers.SearchAlias, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.alias, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; -const listOperationSpec: coreClient.OperationSpec = { - path: "/aliases", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.ListAliasesResult, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint], - headerParameters: [Parameters.accept], - serializer, -}; -const createOrUpdateOperationSpec: coreClient.OperationSpec = { - path: "/aliases('{aliasName}')", - httpMethod: "PUT", - responses: { - 200: { - bodyMapper: Mappers.SearchAlias, - }, - 201: { - bodyMapper: Mappers.SearchAlias, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.alias, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.aliasName], - headerParameters: [ - Parameters.contentType, - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - Parameters.prefer, - ], - mediaType: "json", - serializer, -}; -const deleteOperationSpec: coreClient.OperationSpec = { - path: "/aliases('{aliasName}')", - httpMethod: "DELETE", - responses: { - 204: {}, - 404: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.aliasName], - headerParameters: [ - Parameters.accept, - Parameters.ifMatch, - Parameters.ifNoneMatch, - ], - serializer, -}; -const getOperationSpec: coreClient.OperationSpec = { - path: "/aliases('{aliasName}')", - httpMethod: "GET", - responses: { - 200: { - bodyMapper: Mappers.SearchAlias, - }, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.aliasName], - headerParameters: [Parameters.accept], - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts index 5bf610119b24..71292ac4e3d3 100644 --- a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts +++ b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { DataSources } from "../operationsInterfaces/index.js"; +import { DataSources } from "../operationsInterfaces"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; +import * as Mappers from "../models/mappers"; +import * as Parameters from "../models/parameters"; +import { SearchServiceClient } from "../searchServiceClient"; import { SearchIndexerDataSource, DataSourcesCreateOrUpdateOptionalParams, @@ -22,7 +22,7 @@ import { DataSourcesListResponse, DataSourcesCreateOptionalParams, DataSourcesCreateResponse, -} from "../models/index.js"; +} from "../models"; /** Class containing DataSources operations. */ export class DataSourcesImpl implements DataSources { @@ -126,10 +126,7 @@ const createOrUpdateOperationSpec: coreClient.OperationSpec = { }, }, requestBody: Parameters.dataSource, - queryParameters: [ - Parameters.apiVersion, - Parameters.skipIndexerResetRequirementForCache, - ], + queryParameters: [Parameters.apiVersion], urlParameters: [Parameters.endpoint, Parameters.dataSourceName], headerParameters: [ Parameters.contentType, diff --git a/sdk/search/search-documents/src/generated/service/operations/index.ts b/sdk/search/search-documents/src/generated/service/operations/index.ts index 4022a60095f0..896ae33eded4 100644 --- a/sdk/search/search-documents/src/generated/service/operations/index.ts +++ b/sdk/search/search-documents/src/generated/service/operations/index.ts @@ -6,9 +6,8 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./dataSources.js"; -export * from "./indexers.js"; -export * from "./skillsets.js"; -export * from "./synonymMaps.js"; -export * from "./indexes.js"; -export * from "./aliases.js"; +export * from "./dataSources"; +export * from "./indexers"; +export * from "./skillsets"; +export * from "./synonymMaps"; +export * from "./indexes"; diff --git a/sdk/search/search-documents/src/generated/service/operations/indexers.ts b/sdk/search/search-documents/src/generated/service/operations/indexers.ts index 98d42fc0058d..dbdb52a59026 100644 --- a/sdk/search/search-documents/src/generated/service/operations/indexers.ts +++ b/sdk/search/search-documents/src/generated/service/operations/indexers.ts @@ -6,14 +6,13 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Indexers } from "../operationsInterfaces/index.js"; +import { Indexers } from "../operationsInterfaces"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; +import * as Mappers from "../models/mappers"; +import * as Parameters from "../models/parameters"; +import { SearchServiceClient } from "../searchServiceClient"; import { IndexersResetOptionalParams, - IndexersResetDocsOptionalParams, IndexersRunOptionalParams, SearchIndexer, IndexersCreateOrUpdateOptionalParams, @@ -27,7 +26,7 @@ import { IndexersCreateResponse, IndexersGetStatusOptionalParams, IndexersGetStatusResponse, -} from "../models/index.js"; +} from "../models"; /** Class containing Indexers operations. */ export class IndexersImpl implements Indexers { @@ -56,21 +55,6 @@ export class IndexersImpl implements Indexers { ); } - /** - * Resets specific documents in the datasource to be selectively re-ingested by the indexer. - * @param indexerName The name of the indexer to reset documents for. - * @param options The options parameters. - */ - resetDocs( - indexerName: string, - options?: IndexersResetDocsOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { indexerName, options }, - resetDocsOperationSpec, - ); - } - /** * Runs an indexer on-demand. * @param indexerName The name of the indexer to run. @@ -185,22 +169,6 @@ const resetOperationSpec: coreClient.OperationSpec = { headerParameters: [Parameters.accept], serializer, }; -const resetDocsOperationSpec: coreClient.OperationSpec = { - path: "/indexers('{indexerName}')/search.resetdocs", - httpMethod: "POST", - responses: { - 204: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.keysOrIds, - queryParameters: [Parameters.apiVersion, Parameters.overwrite], - urlParameters: [Parameters.endpoint, Parameters.indexerName], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; const runOperationSpec: coreClient.OperationSpec = { path: "/indexers('{indexerName}')/search.run", httpMethod: "POST", @@ -230,11 +198,7 @@ const createOrUpdateOperationSpec: coreClient.OperationSpec = { }, }, requestBody: Parameters.indexer, - queryParameters: [ - Parameters.apiVersion, - Parameters.skipIndexerResetRequirementForCache, - Parameters.disableCacheReprocessingChangeDetection, - ], + queryParameters: [Parameters.apiVersion], urlParameters: [Parameters.endpoint, Parameters.indexerName], headerParameters: [ Parameters.contentType, diff --git a/sdk/search/search-documents/src/generated/service/operations/indexes.ts b/sdk/search/search-documents/src/generated/service/operations/indexes.ts index 7487b922cf78..c456c969db12 100644 --- a/sdk/search/search-documents/src/generated/service/operations/indexes.ts +++ b/sdk/search/search-documents/src/generated/service/operations/indexes.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Indexes } from "../operationsInterfaces/index.js"; +import { Indexes } from "../operationsInterfaces"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; +import * as Mappers from "../models/mappers"; +import * as Parameters from "../models/parameters"; +import { SearchServiceClient } from "../searchServiceClient"; import { SearchIndex, IndexesCreateOptionalParams, @@ -27,7 +27,7 @@ import { AnalyzeRequest, IndexesAnalyzeOptionalParams, IndexesAnalyzeResponse, -} from "../models/index.js"; +} from "../models"; /** Class containing Indexes operations. */ export class IndexesImpl implements Indexes { diff --git a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts index 770caf4c4473..c9394176f298 100644 --- a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts +++ b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Skillsets } from "../operationsInterfaces/index.js"; +import { Skillsets } from "../operationsInterfaces"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; +import * as Mappers from "../models/mappers"; +import * as Parameters from "../models/parameters"; +import { SearchServiceClient } from "../searchServiceClient"; import { SearchIndexerSkillset, SkillsetsCreateOrUpdateOptionalParams, @@ -22,9 +22,7 @@ import { SkillsetsListResponse, SkillsetsCreateOptionalParams, SkillsetsCreateResponse, - SkillNames, - SkillsetsResetSkillsOptionalParams, -} from "../models/index.js"; +} from "../models"; /** Class containing Skillsets operations. */ export class SkillsetsImpl implements Skillsets { @@ -107,23 +105,6 @@ export class SkillsetsImpl implements Skillsets { createOperationSpec, ); } - - /** - * Reset an existing skillset in a search service. - * @param skillsetName The name of the skillset to reset. - * @param skillNames The names of skills to reset. - * @param options The options parameters. - */ - resetSkills( - skillsetName: string, - skillNames: SkillNames, - options?: SkillsetsResetSkillsOptionalParams, - ): Promise { - return this.client.sendOperationRequest( - { skillsetName, skillNames, options }, - resetSkillsOperationSpec, - ); - } } // Operation Specifications const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); @@ -143,11 +124,7 @@ const createOrUpdateOperationSpec: coreClient.OperationSpec = { }, }, requestBody: Parameters.skillset, - queryParameters: [ - Parameters.apiVersion, - Parameters.skipIndexerResetRequirementForCache, - Parameters.disableCacheReprocessingChangeDetection, - ], + queryParameters: [Parameters.apiVersion], urlParameters: [Parameters.endpoint, Parameters.skillsetName], headerParameters: [ Parameters.contentType, @@ -228,19 +205,3 @@ const createOperationSpec: coreClient.OperationSpec = { mediaType: "json", serializer, }; -const resetSkillsOperationSpec: coreClient.OperationSpec = { - path: "/skillsets('{skillsetName}')/search.resetskills", - httpMethod: "POST", - responses: { - 204: {}, - default: { - bodyMapper: Mappers.ErrorResponse, - }, - }, - requestBody: Parameters.skillNames, - queryParameters: [Parameters.apiVersion], - urlParameters: [Parameters.endpoint, Parameters.skillsetName], - headerParameters: [Parameters.contentType, Parameters.accept], - mediaType: "json", - serializer, -}; diff --git a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts index 8bd281f6aeda..afde7649c7d9 100644 --- a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts +++ b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { SynonymMaps } from "../operationsInterfaces/index.js"; +import { SynonymMaps } from "../operationsInterfaces"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers.js"; -import * as Parameters from "../models/parameters.js"; -import { SearchServiceClient } from "../searchServiceClient.js"; +import * as Mappers from "../models/mappers"; +import * as Parameters from "../models/parameters"; +import { SearchServiceClient } from "../searchServiceClient"; import { SynonymMap, SynonymMapsCreateOrUpdateOptionalParams, @@ -22,7 +22,7 @@ import { SynonymMapsListResponse, SynonymMapsCreateOptionalParams, SynonymMapsCreateResponse, -} from "../models/index.js"; +} from "../models"; /** Class containing SynonymMaps operations. */ export class SynonymMapsImpl implements SynonymMaps { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts deleted file mode 100644 index 1a0751eb6ed4..000000000000 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) Microsoft Corporation. - * Licensed under the MIT License. - * - * Code generated by Microsoft (R) AutoRest Code Generator. - * Changes may cause incorrect behavior and will be lost if the code is regenerated. - */ - -import { - SearchAlias, - AliasesCreateOptionalParams, - AliasesCreateResponse, - AliasesListOptionalParams, - AliasesListResponse, - AliasesCreateOrUpdateOptionalParams, - AliasesCreateOrUpdateResponse, - AliasesDeleteOptionalParams, - AliasesGetOptionalParams, - AliasesGetResponse, -} from "../models/index.js"; - -/** Interface representing a Aliases. */ -export interface Aliases { - /** - * Creates a new search alias. - * @param alias The definition of the alias to create. - * @param options The options parameters. - */ - create( - alias: SearchAlias, - options?: AliasesCreateOptionalParams, - ): Promise; - /** - * Lists all aliases available for a search service. - * @param options The options parameters. - */ - list(options?: AliasesListOptionalParams): Promise; - /** - * Creates a new search alias or updates an alias if it already exists. - * @param aliasName The definition of the alias to create or update. - * @param alias The definition of the alias to create or update. - * @param options The options parameters. - */ - createOrUpdate( - aliasName: string, - alias: SearchAlias, - options?: AliasesCreateOrUpdateOptionalParams, - ): Promise; - /** - * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no - * recovery option. The mapped index is untouched by this operation. - * @param aliasName The name of the alias to delete. - * @param options The options parameters. - */ - delete( - aliasName: string, - options?: AliasesDeleteOptionalParams, - ): Promise; - /** - * Retrieves an alias definition. - * @param aliasName The name of the alias to retrieve. - * @param options The options parameters. - */ - get( - aliasName: string, - options?: AliasesGetOptionalParams, - ): Promise; -} diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts index 36a165a3974f..801ff187e26a 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts @@ -17,7 +17,7 @@ import { DataSourcesListResponse, DataSourcesCreateOptionalParams, DataSourcesCreateResponse, -} from "../models/index.js"; +} from "../models"; /** Interface representing a DataSources. */ export interface DataSources { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts index 4022a60095f0..896ae33eded4 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts @@ -6,9 +6,8 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./dataSources.js"; -export * from "./indexers.js"; -export * from "./skillsets.js"; -export * from "./synonymMaps.js"; -export * from "./indexes.js"; -export * from "./aliases.js"; +export * from "./dataSources"; +export * from "./indexers"; +export * from "./skillsets"; +export * from "./synonymMaps"; +export * from "./indexes"; diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts index 3b9a09dc78aa..695f6815f47e 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts @@ -8,7 +8,6 @@ import { IndexersResetOptionalParams, - IndexersResetDocsOptionalParams, IndexersRunOptionalParams, SearchIndexer, IndexersCreateOrUpdateOptionalParams, @@ -22,7 +21,7 @@ import { IndexersCreateResponse, IndexersGetStatusOptionalParams, IndexersGetStatusResponse, -} from "../models/index.js"; +} from "../models"; /** Interface representing a Indexers. */ export interface Indexers { @@ -35,15 +34,6 @@ export interface Indexers { indexerName: string, options?: IndexersResetOptionalParams, ): Promise; - /** - * Resets specific documents in the datasource to be selectively re-ingested by the indexer. - * @param indexerName The name of the indexer to reset documents for. - * @param options The options parameters. - */ - resetDocs( - indexerName: string, - options?: IndexersResetDocsOptionalParams, - ): Promise; /** * Runs an indexer on-demand. * @param indexerName The name of the indexer to run. diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts index 97c64eb3214c..dc88a3a325d4 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts @@ -22,7 +22,7 @@ import { AnalyzeRequest, IndexesAnalyzeOptionalParams, IndexesAnalyzeResponse, -} from "../models/index.js"; +} from "../models"; /** Interface representing a Indexes. */ export interface Indexes { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts index 2287fd215c5c..04ba69fa9b27 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts @@ -17,9 +17,7 @@ import { SkillsetsListResponse, SkillsetsCreateOptionalParams, SkillsetsCreateResponse, - SkillNames, - SkillsetsResetSkillsOptionalParams, -} from "../models/index.js"; +} from "../models"; /** Interface representing a Skillsets. */ export interface Skillsets { @@ -66,15 +64,4 @@ export interface Skillsets { skillset: SearchIndexerSkillset, options?: SkillsetsCreateOptionalParams, ): Promise; - /** - * Reset an existing skillset in a search service. - * @param skillsetName The name of the skillset to reset. - * @param skillNames The names of skills to reset. - * @param options The options parameters. - */ - resetSkills( - skillsetName: string, - skillNames: SkillNames, - options?: SkillsetsResetSkillsOptionalParams, - ): Promise; } diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts index 12eefed6a043..b26e83a49d74 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts @@ -17,7 +17,7 @@ import { SynonymMapsListResponse, SynonymMapsCreateOptionalParams, SynonymMapsCreateResponse, -} from "../models/index.js"; +} from "../models"; /** Interface representing a SynonymMaps. */ export interface SynonymMaps { diff --git a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts index 6e24506ad82c..c21faa69bd76 100644 --- a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts +++ b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts @@ -19,29 +19,27 @@ import { SkillsetsImpl, SynonymMapsImpl, IndexesImpl, - AliasesImpl, -} from "./operations/index.js"; +} from "./operations"; import { DataSources, Indexers, Skillsets, SynonymMaps, Indexes, - Aliases, -} from "./operationsInterfaces/index.js"; -import * as Parameters from "./models/parameters.js"; -import * as Mappers from "./models/mappers.js"; +} from "./operationsInterfaces"; +import * as Parameters from "./models/parameters"; +import * as Mappers from "./models/mappers"; import { - ApiVersion20241101Preview, + ApiVersion20240701, SearchServiceClientOptionalParams, GetServiceStatisticsOptionalParams, GetServiceStatisticsResponse, -} from "./models/index.js"; +} from "./models"; /** @internal */ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { endpoint: string; - apiVersion: ApiVersion20241101Preview; + apiVersion: ApiVersion20240701; /** * Initializes a new instance of the SearchServiceClient class. @@ -51,7 +49,7 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { */ constructor( endpoint: string, - apiVersion: ApiVersion20241101Preview, + apiVersion: ApiVersion20240701, options?: SearchServiceClientOptionalParams, ) { if (endpoint === undefined) { @@ -69,7 +67,7 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { requestContentType: "application/json; charset=utf-8", }; - const packageDetails = `azsdk-js-search-documents/12.2.0-beta.2`; + const packageDetails = `azsdk-js-search-documents/12.1.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` @@ -92,7 +90,6 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { this.skillsets = new SkillsetsImpl(this); this.synonymMaps = new SynonymMapsImpl(this); this.indexes = new IndexesImpl(this); - this.aliases = new AliasesImpl(this); this.addCustomApiVersionPolicy(apiVersion); } @@ -142,7 +139,6 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { skillsets: Skillsets; synonymMaps: SynonymMaps; indexes: Indexes; - aliases: Aliases; } // Operation Specifications const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); diff --git a/sdk/search/search-documents/src/geographyPoint.ts b/sdk/search/search-documents/src/geographyPoint.ts index dd3f8b8ac916..6fc07ee7d14a 100644 --- a/sdk/search/search-documents/src/geographyPoint.ts +++ b/sdk/search/search-documents/src/geographyPoint.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. const WorldGeodeticSystem1984 = "EPSG:4326"; // See https://epsg.io/4326 diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index aa30f09c94d6..175ecfa37700 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -1,58 +1,32 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. export { AzureKeyCredential } from "@azure/core-auth"; export { AutocompleteItem, AutocompleteMode, AutocompleteResult, - DebugInfo, FacetResult, - HybridCountAndFacetMode, - HybridSearch as HybridSearchOptions, IndexActionType, IndexDocumentsResult, IndexingResult, - KnownHybridCountAndFacetMode, - KnownQueryDebugMode, - KnownQueryLanguage, - KnownQuerySpellerType as KnownQuerySpeller, KnownSemanticErrorMode, KnownSemanticErrorReason, - KnownSemanticFieldState, - KnownSemanticQueryRewritesResultType, KnownSemanticSearchResultsType, KnownVectorFilterMode, KnownVectorQueryKind, - KnownVectorThresholdKind, QueryAnswerResult, QueryCaptionResult, - QueryDebugMode, - QueryLanguage, - QueryResultDocumentRerankerInput, - QueryResultDocumentSubscores, - QueryRewritesDebugInfo, - QueryRewritesValuesDebugInfo, - QuerySpellerType as QuerySpeller, QueryType, ScoringStatistics, SearchMode, - SemanticFieldState, - SemanticQueryRewritesResultType, - SingleVectorFieldResult, - TextResult, - VectorsDebugInfo, -} from "./generated/data/models/index.js"; +} from "./generated/data/models"; export { - AIServicesAccountKey, - AIStudioModelCatalogName, AnalyzedTokenInfo, AnalyzeResult, AsciiFoldingTokenFilter, AzureActiveDirectoryApplicationCredentials, - AzureMachineLearningSkill, AzureOpenAIModelName, - AzureOpenAITokenizerParameters, BinaryQuantizationCompression, BM25Similarity, CharFilter as BaseCharFilter, @@ -68,7 +42,6 @@ export { CorsOptions, CustomEntity, CustomEntityAlias, - CustomNormalizer, DataChangeDetectionPolicy as BaseDataChangeDetectionPolicy, DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy, DefaultCognitiveServicesAccount, @@ -76,8 +49,6 @@ export { DistanceScoringFunction, DistanceScoringParameters, DocumentExtractionSkill, - DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, - DocumentIntelligenceLayoutSkillOutputMode, EdgeNGramTokenFilterSide, EdgeNGramTokenizer, ElisionTokenFilter, @@ -90,38 +61,27 @@ export { HighWaterMarkChangeDetectionPolicy, IndexerExecutionResult, IndexerExecutionStatus, - IndexerExecutionStatusDetail, - IndexerState, IndexerStatus, - IndexingMode, IndexingSchedule, IndexProjectionMode, InputFieldMappingEntry, KeepTokenFilter, KeywordMarkerTokenFilter, - KnownAIStudioModelCatalogName, KnownAzureOpenAIModelName, KnownBlobIndexerDataToExtract, KnownBlobIndexerImageAction, KnownBlobIndexerParsingMode, KnownBlobIndexerPDFTextRotationAlgorithm, + KnownCharFilterName as KnownCharFilterNames, KnownCustomEntityLookupSkillLanguage, - KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth, - KnownDocumentIntelligenceLayoutSkillOutputMode, KnownEntityCategory, KnownEntityRecognitionSkillLanguage, KnownImageAnalysisSkillLanguage, KnownImageDetail, KnownIndexerExecutionEnvironment, - KnownIndexerExecutionStatusDetail, - KnownIndexingMode, KnownIndexProjectionMode, KnownKeyPhraseExtractionSkillLanguage, - KnownLexicalAnalyzerName, - KnownLexicalNormalizerName, - KnownLexicalNormalizerName as KnownNormalizerNames, - KnownMarkdownHeaderDepth, - KnownMarkdownParsingSubmode, + KnownLexicalTokenizerName as KnownTokenizerNames, KnownOcrLineEnding, KnownOcrSkillLanguage, KnownPIIDetectionSkillMaskingMode, @@ -129,16 +89,14 @@ export { KnownSearchFieldDataType, KnownSearchIndexerDataSourceType, KnownSentimentSkillLanguage, - KnownSplitSkillEncoderModelName, KnownSplitSkillLanguage, - KnownSplitSkillUnit, KnownTextSplitMode, KnownTextTranslationSkillLanguage, + KnownTokenFilterName as KnownTokenFilterNames, KnownVectorEncodingFormat, KnownVectorSearchAlgorithmKind, KnownVectorSearchAlgorithmMetric, KnownVectorSearchCompressionKind, - KnownVectorSearchCompressionRescoreStorageMethod, KnownVectorSearchCompressionTarget, KnownVectorSearchVectorizerKind, KnownVisualFeature, @@ -146,8 +104,6 @@ export { LengthTokenFilter, LexicalAnalyzer as BaseLexicalAnalyzer, LexicalAnalyzerName, - LexicalNormalizer as BaseLexicalNormalizer, - LexicalNormalizerName, LexicalTokenizer as BaseLexicalTokenizer, LexicalTokenizerName, LimitTokenFilter, @@ -155,14 +111,11 @@ export { MagnitudeScoringFunction, MagnitudeScoringParameters, MappingCharFilter, - MarkdownHeaderDepth, - MarkdownParsingSubmode, MergeSkill, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, MicrosoftStemmingTokenizerLanguage, MicrosoftTokenizerLanguage, - NativeBlobSoftDeleteDeletionDetectionPolicy, NGramTokenizer, OcrLineEnding, OutputFieldMappingEntry, @@ -172,14 +125,12 @@ export { PatternReplaceTokenFilter, PhoneticEncoder, PhoneticTokenFilter, - RescoringOptions, ResourceCounter, ScalarQuantizationCompression, ScalarQuantizationParameters, ScoringFunction as BaseScoringFunction, ScoringFunctionAggregation, ScoringFunctionInterpolation, - SearchAlias, SearchIndexerDataContainer, SearchIndexerDataIdentity as BaseSearchIndexerDataIdentity, SearchIndexerDataNoneIdentity, @@ -209,8 +160,6 @@ export { SnowballTokenFilter, SnowballTokenFilterLanguage, SoftDeleteColumnDeletionDetectionPolicy, - SplitSkillEncoderModelName, - SplitSkillUnit, SqlIntegratedChangeTrackingPolicy, StemmerOverrideTokenFilter, StemmerTokenFilter, @@ -232,28 +181,24 @@ export { VectorEncodingFormat, VectorSearchCompression as BaseVectorSearchCompression, VectorSearchCompressionKind, - VectorSearchCompressionRescoreStorageMethod, VectorSearchCompressionTarget, VectorSearchProfile, VectorSearchVectorizerKind, WordDelimiterTokenFilter, -} from "./generated/service/models/index.js"; -export { default as GeographyPoint } from "./geographyPoint.js"; -export { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; +} from "./generated/service/models"; +export { default as GeographyPoint } from "./geographyPoint"; +export { IndexDocumentsBatch } from "./indexDocumentsBatch"; export { AutocompleteOptions, AutocompleteRequest, BaseSearchRequestOptions, BaseVectorQuery, - BaseVectorThreshold, CountDocumentsOptions, DeleteDocumentsOptions, - DocumentDebugInfo, ExcludedODataTypes, ExtractDocumentKey, ExtractiveQueryAnswer, ExtractiveQueryCaption, - GenerativeQueryRewrites, GetDocumentOptions, IndexDocumentsAction, IndexDocumentsOptions, @@ -263,8 +208,6 @@ export { NarrowedModel, QueryAnswer, QueryCaption, - QueryResultDocumentSemanticField, - QueryRewrites, SearchDocumentsPageResult, SearchDocumentsResult, SearchDocumentsResultBase, @@ -281,10 +224,8 @@ export { SearchRequestOptions, SearchRequestQueryTypeOptions, SearchResult, - SearchScoreThreshold, SelectArray, SelectFields, - SemanticDebugInfo, SemanticErrorMode, SemanticErrorReason, SemanticSearchOptions, @@ -297,41 +238,30 @@ export { UnionToIntersection, UploadDocumentsOptions, VectorFilterMode, - VectorizableImageBinaryQuery, - VectorizableImageUrlQuery, VectorizableTextQuery, VectorizedQuery, VectorQuery, VectorQueryKind, VectorSearchOptions, - VectorSimilarityThreshold, - VectorThreshold, -} from "./indexModels.js"; -export { odata } from "./odata.js"; -export { KnownSearchAudience } from "./searchAudience.js"; -export { SearchClient, SearchClientOptions } from "./searchClient.js"; -export { SearchIndexClient, SearchIndexClientOptions } from "./searchIndexClient.js"; -export { SearchIndexerClient, SearchIndexerClientOptions } from "./searchIndexerClient.js"; +} from "./indexModels"; +export { odata } from "./odata"; +export { KnownSearchAudience } from "./searchAudience"; +export { SearchClient, SearchClientOptions } from "./searchClient"; +export { SearchIndexClient, SearchIndexClientOptions } from "./searchIndexClient"; +export { SearchIndexerClient, SearchIndexerClientOptions } from "./searchIndexerClient"; export { DEFAULT_BATCH_SIZE, DEFAULT_FLUSH_WINDOW, DEFAULT_RETRY_COUNT, IndexDocumentsClient, SearchIndexingBufferedSender, -} from "./searchIndexingBufferedSender.js"; +} from "./searchIndexingBufferedSender"; export { - AIServicesAccountIdentity, - AIServicesVisionParameters, - AIServicesVisionVectorizer, - AliasIterator, AnalyzeRequest, AnalyzeTextOptions, - AzureMachineLearningVectorizer, - AzureMachineLearningVectorizerParameters, AzureOpenAIEmbeddingSkill, AzureOpenAIParameters, AzureOpenAIVectorizer, - BaseAzureMachineLearningVectorizerParameters, BaseVectorSearchAlgorithmConfiguration, BaseVectorSearchVectorizer, BlobIndexerDataToExtract, @@ -342,11 +272,9 @@ export { CognitiveServicesAccount, ComplexDataType, ComplexField, - CreateAliasOptions, CreateDataSourceConnectionOptions, CreateIndexerOptions, CreateIndexOptions, - CreateOrUpdateAliasOptions, CreateorUpdateDataSourceConnectionOptions, CreateorUpdateIndexerOptions, CreateOrUpdateIndexOptions, @@ -359,20 +287,17 @@ export { CustomEntityLookupSkillLanguage, DataChangeDetectionPolicy, DataDeletionDetectionPolicy, - DeleteAliasOptions, DeleteDataSourceConnectionOptions, DeleteIndexerOptions, DeleteIndexOptions, DeleteSkillsetOptions, DeleteSynonymMapOptions, - DocumentIntelligenceLayoutSkill, EdgeNGramTokenFilter, EntityCategory, EntityRecognitionSkill, EntityRecognitionSkillLanguage, ExhaustiveKnnAlgorithmConfiguration, ExhaustiveKnnParameters, - GetAliasOptions, GetDataSourceConnectionOptions, GetIndexerOptions, GetIndexerStatusOptions, @@ -391,18 +316,12 @@ export { IndexingParametersConfiguration, IndexIterator, IndexNameIterator, - KeyAuthAzureMachineLearningVectorizerParameters, KeyPhraseExtractionSkill, KeyPhraseExtractionSkillLanguage, KeywordTokenizer, KnownAnalyzerNames, - KnownCharFilterNames, - KnownTokenFilterNames, - KnownTokenizerNames, LexicalAnalyzer, - LexicalNormalizer, LexicalTokenizer, - ListAliasesOptions, ListDataSourceConnectionsOptions, ListIndexersOptions, ListIndexesOptions, @@ -410,7 +329,6 @@ export { ListSynonymMapsOptions, LuceneStandardTokenizer, NGramTokenFilter, - NoAuthAzureMachineLearningVectorizerParameters, OcrSkill, OcrSkillLanguage, PatternAnalyzer, @@ -418,18 +336,14 @@ export { PIIDetectionSkill, PIIDetectionSkillMaskingMode, RegexFlags, - ResetDocumentsOptions, ResetIndexerOptions, - ResetSkillsOptions, RunIndexerOptions, ScoringFunction, ScoringProfile, SearchField, SearchFieldDataType, SearchIndex, - SearchIndexAlias, SearchIndexer, - SearchIndexerCache, SearchIndexerDataIdentity, SearchIndexerDataSourceConnection, SearchIndexerDataSourceType, @@ -452,7 +366,6 @@ export { TextSplitMode, TextTranslationSkill, TextTranslationSkillLanguage, - TokenAuthAzureMachineLearningVectorizerParameters, TokenFilter, VectorSearch, VectorSearchAlgorithmConfiguration, @@ -460,10 +373,9 @@ export { VectorSearchAlgorithmMetric, VectorSearchCompression, VectorSearchVectorizer, - VisionVectorizeSkill, VisualFeature, WebApiParameters, WebApiSkill, WebApiVectorizer, -} from "./serviceModels.js"; -export { createSynonymMapFromFile } from "./synonymMapHelper.js"; +} from "./serviceModels"; +export { createSynonymMapFromFile } from "./synonymMapHelper"; diff --git a/sdk/search/search-documents/src/indexDocumentsBatch.ts b/sdk/search/search-documents/src/indexDocumentsBatch.ts index f3b44a88ab7e..1122943bb701 100644 --- a/sdk/search/search-documents/src/indexDocumentsBatch.ts +++ b/sdk/search/search-documents/src/indexDocumentsBatch.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { IndexDocumentsAction } from "./indexModels.js"; +import { IndexDocumentsAction } from "./indexModels"; /** * Class used to perform batch operations @@ -34,7 +34,7 @@ export class IndexDocumentsBatch { /** * Update a set of documents in the index. - * For more details about how merging works, see https://learn.microsoft.com/rest/api/searchservice/AddUpdate-or-Delete-Documents + * For more details about how merging works, see https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents * @param documents - The updated documents. */ public merge(documents: TModel[]): void { @@ -50,7 +50,7 @@ export class IndexDocumentsBatch { /** * Update a set of documents in the index or uploads them if they don't exist. - * For more details about how merging works, see https://learn.microsoft.com/rest/api/searchservice/AddUpdate-or-Delete-Documents + * For more details about how merging works, see https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents * @param documents - The new/updated documents. */ public mergeOrUpload(documents: TModel[]): void { diff --git a/sdk/search/search-documents/src/indexModels.ts b/sdk/search/search-documents/src/indexModels.ts index 587b9c83d87d..529aa660011e 100644 --- a/sdk/search/search-documents/src/indexModels.ts +++ b/sdk/search/search-documents/src/indexModels.ts @@ -1,13 +1,11 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { OperationOptions } from "@azure/core-client"; -import type { PagedAsyncIterableIterator } from "@azure/core-paging"; -import type { +import { OperationOptions } from "@azure/core-client"; +import { PagedAsyncIterableIterator } from "@azure/core-paging"; +import { AutocompleteMode, - DebugInfo, FacetResult, - HybridSearch, IndexActionType, KnownSemanticErrorMode, KnownSemanticErrorReason, @@ -16,18 +14,11 @@ import type { KnownVectorQueryKind, QueryAnswerResult, QueryCaptionResult, - QueryDebugMode, - QueryLanguage, - QueryResultDocumentRerankerInput, - QuerySpellerType as QuerySpeller, QueryType, ScoringStatistics, SearchMode, - SemanticFieldState, - SemanticQueryRewritesResultType, - VectorsDebugInfo, -} from "./generated/data/models/index.js"; -import type GeographyPoint from "./geographyPoint.js"; +} from "./generated/data/models"; +import GeographyPoint from "./geographyPoint"; /** * Options for performing the count operation on the index. @@ -190,9 +181,7 @@ export type SearchIterator< /** The query parameters for vector and hybrid search queries. */ export type VectorQuery = | VectorizedQuery - | VectorizableTextQuery - | VectorizableImageUrlQuery - | VectorizableImageBinaryQuery; + | VectorizableTextQuery; /** The query parameters for vector and hybrid search queries. */ export interface BaseVectorQuery { @@ -200,8 +189,6 @@ export interface BaseVectorQuery { * ### Known values supported by the service * **vector**: Vector query where a raw vector value is provided. * **text**: Vector query where a text value that needs to be vectorized is provided. - * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided. - * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */ kind: VectorQueryKind; /** Number of nearest neighbors to return as top hits. */ @@ -223,11 +210,6 @@ export interface BaseVectorQuery { oversampling?: number; /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */ weight?: number; - /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */ - threshold?: VectorThreshold; - /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in - * the top level filter parameter is used instead. */ - filterOverride?: string; } /** The query parameters to use for vector search when a raw vector value is provided. */ @@ -244,28 +226,6 @@ export interface VectorizableTextQuery extends BaseVector kind: "text"; /** The text to be vectorized to perform a vector search query. */ text: string; - /** - * Can be configured to let a generative model rewrite the query before sending it to be - * vectorized. - */ - queryRewrites?: QueryRewrites; -} - -/** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */ -export interface VectorizableImageUrlQuery extends BaseVectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "imageUrl"; - /** The URL of an image to be vectorized to perform a vector search query. */ - url: string; -} - -/** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */ -export interface VectorizableImageBinaryQuery - extends BaseVectorQuery { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "imageBinary"; - /** The base64 encoded binary of an image to be vectorized to perform a vector search query. */ - binaryImage: string; } /** @@ -343,14 +303,6 @@ export interface BaseSearchRequestOptions< * fielded search expression take precedence over any field names listed in this parameter. */ searchFields?: SearchFieldArray; - /** - * The language of the query. - */ - queryLanguage?: QueryLanguage; - /** - * Improve search recall by spell-correcting individual search query terms. - */ - speller?: QuerySpeller; /** * A value that specifies whether any or all of the search terms must be matched in order to * count the document as a match. Possible values include: 'any', 'all' @@ -392,9 +344,6 @@ export interface BaseSearchRequestOptions< * Defines options for vector search queries */ vectorSearchOptions?: VectorSearchOptions; - - /** The query parameters to configure hybrid search behaviors. */ - hybridSearch?: HybridSearch; } /** @@ -445,12 +394,6 @@ export type SearchResult< readonly captions?: QueryCaptionResult[]; document: NarrowedModel; - - /** - * Contains debugging information that can be used to further explore your search results. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly documentDebugInfo?: DocumentDebugInfo; }; /** @@ -483,11 +426,6 @@ export interface SearchDocumentsResultBase { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly answers?: QueryAnswerResult[]; - /** - * Debug information that applies to the search results as a whole. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly debugInfo?: DebugInfo; /** * Reason that a partial response was returned for a semantic search request. * NOTE: This property will not be serialized. It can only be populated by the server. @@ -498,11 +436,6 @@ export interface SearchDocumentsResultBase { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly semanticSearchResultsType?: SemanticSearchResultsType; - /** - * Type of query rewrite that was used to retrieve documents. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; } /** @@ -908,25 +841,6 @@ export interface QueryResultDocumentSemanticField { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly name?: string; - /** - * The way the field was used for the semantic enrichment process (fully used, partially used, or unused) - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly state?: SemanticFieldState; -} - -/** Contains debugging information that can be used to further explore your search results. */ -export interface DocumentDebugInfo { - /** - * Contains debugging information specific to semantic search queries. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly semantic?: SemanticDebugInfo; - /** - * Contains debugging information specific to vector and hybrid search. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly vectors?: VectorsDebugInfo; } /** @@ -948,11 +862,6 @@ export interface SemanticDebugInfo { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly keywordFields?: QueryResultDocumentSemanticField[]; - /** - * The raw concatenated strings that were sent to the semantic enrichment process. - * NOTE: This property will not be serialized. It can only be populated by the server. - */ - readonly rerankerInput?: QueryResultDocumentRerankerInput; } /** @@ -969,10 +878,6 @@ export interface ExtractiveQueryAnswer { * The confidence threshold. Default threshold is 0.7 */ threshold?: number; - /** - * An optional upper bound on the number of characters in each answer. - */ - maxAnswerLength?: number; } /** @@ -986,10 +891,6 @@ export type QueryAnswer = ExtractiveQueryAnswer; export interface ExtractiveQueryCaption { captionType: "extractive"; highlight?: boolean; - /** - * An optional upper bound on the number of characters in each caption. - */ - maxCaptionLength?: number; } /** @@ -1029,36 +930,12 @@ export interface SemanticSearchOptions { * to 'None'. */ captions?: QueryCaption; - /** - * When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will - * produce 10 (default) rewrites to help increase the recall of the request. Defaults to `none`. - */ - queryRewrites?: QueryRewrites; /** * Allows setting a separate search query that will be solely used for semantic reranking, * semantic captions and semantic answers. Is useful for scenarios where there is a need to use * different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ semanticQuery?: string; - /** - * The list of field names used for semantic search. - */ - semanticFields?: string[]; - /** - * Enables a debugging tool that can be used to further explore your search results. - */ - debugMode?: QueryDebugMode; -} - -/** Defines options for query rewrites. */ -export type QueryRewrites = GenerativeQueryRewrites; - -/** Generate alternative query terms to increase the recall of a search request. */ -export interface GenerativeQueryRewrites { - /** Polymorphic discriminator, which specifies the different types this object can be */ - rewritesType: "generative"; - /** The number of query rewrites to generate. Defaults to 10.*/ - count?: number; } /** @@ -1099,6 +976,7 @@ export interface SearchScoreThreshold extends BaseVectorThreshold { /** The threshold used for vector queries. */ export type VectorThreshold = VectorSimilarityThreshold | SearchScoreThreshold; + export type SemanticErrorMode = `${KnownSemanticErrorMode}`; export type SemanticErrorReason = `${KnownSemanticErrorReason}`; export type SemanticSearchResultsType = `${KnownSemanticSearchResultsType}`; diff --git a/sdk/search/search-documents/src/logger.ts b/sdk/search/search-documents/src/logger.ts index f3a939cdcde7..75335573005d 100644 --- a/sdk/search/search-documents/src/logger.ts +++ b/sdk/search/search-documents/src/logger.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { createClientLogger } from "@azure/logger"; diff --git a/sdk/search/search-documents/src/odata.ts b/sdk/search/search-documents/src/odata.ts index 11ef22fd7f9f..e8f8273b46cd 100644 --- a/sdk/search/search-documents/src/odata.ts +++ b/sdk/search/search-documents/src/odata.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. function formatNullAndUndefined(input: unknown): string | unknown { if (input === null || input === undefined) { @@ -26,14 +26,12 @@ function escapeQuotesIfString(input: unknown, previous: string): string | unknow /** * Escapes an odata filter expression to avoid errors with quoting string literals. * Example usage: - * ```ts snippet:ReadmeSampleOdataUsage - * import { odata } from "@azure/search-documents"; - * + * ```ts * const baseRateMax = 200; * const ratingMin = 4; * const filter = odata`Rooms/any(room: room/BaseRate lt ${baseRateMax}) and Rating ge ${ratingMin}`; * ``` - * For more information on supported syntax see: https://learn.microsoft.com/azure/search/search-query-odata-filter + * For more information on supported syntax see: https://docs.microsoft.com/en-us/azure/search/search-query-odata-filter * @param strings - Array of strings for the expression * @param values - Array of values for the expression */ diff --git a/sdk/search/search-documents/src/odataMetadataPolicy.ts b/sdk/search/search-documents/src/odataMetadataPolicy.ts index b01ddb7ca951..c6b873ee83c1 100644 --- a/sdk/search/search-documents/src/odataMetadataPolicy.ts +++ b/sdk/search/search-documents/src/odataMetadataPolicy.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { +import { PipelinePolicy, PipelineRequest, PipelineResponse, diff --git a/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts b/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts index ce3b4a2b832d..6e3571a4dcab 100644 --- a/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts +++ b/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { KeyCredential } from "@azure/core-auth"; -import type { +import { KeyCredential } from "@azure/core-auth"; +import { PipelinePolicy, PipelineRequest, PipelineResponse, diff --git a/sdk/search/search-documents/src/searchAudience.ts b/sdk/search/search-documents/src/searchAudience.ts index ebc76f688486..c7eb679f1197 100644 --- a/sdk/search/search-documents/src/searchAudience.ts +++ b/sdk/search/search-documents/src/searchAudience.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /** * Known values for Search Audience diff --git a/sdk/search/search-documents/src/searchClient.ts b/sdk/search/search-documents/src/searchClient.ts index 9332c5414097..f3a1e90697ab 100644 --- a/sdk/search/search-documents/src/searchClient.ts +++ b/sdk/search/search-documents/src/searchClient.ts @@ -1,29 +1,26 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /// -import type { KeyCredential, TokenCredential } from "@azure/core-auth"; -import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; -import type { Pipeline } from "@azure/core-rest-pipeline"; +import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-auth"; +import { InternalClientPipelineOptions } from "@azure/core-client"; +import { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import { decode, encode } from "./base64.js"; -import type { +import { decode, encode } from "./base64"; +import { AutocompleteRequest, AutocompleteResult, IndexDocumentsResult, QueryAnswerType as BaseAnswers, QueryCaptionType as BaseCaptions, - QueryRewritesType as GeneratedQueryRewrites, SearchRequest as GeneratedSearchRequest, SuggestRequest, VectorQueryUnion as GeneratedVectorQuery, -} from "./generated/data/models/index.js"; -import { SearchClient as GeneratedClient } from "./generated/data/searchClient.js"; -import { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; -import type { +} from "./generated/data/models"; +import { SearchClient as GeneratedClient } from "./generated/data/searchClient"; +import { IndexDocumentsBatch } from "./indexDocumentsBatch"; +import { AutocompleteOptions, CountDocumentsOptions, DeleteDocumentsOptions, @@ -35,7 +32,6 @@ import type { NarrowedModel, QueryAnswer, QueryCaption, - QueryRewrites, SearchDocumentsPageResult, SearchDocumentsResult, SearchFieldArray, @@ -50,15 +46,15 @@ import type { SuggestOptions, UploadDocumentsOptions, VectorQuery, -} from "./indexModels.js"; -import { logger } from "./logger.js"; -import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; -import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; -import { KnownSearchAudience } from "./searchAudience.js"; -import type { IndexDocumentsClient } from "./searchIndexingBufferedSender.js"; -import { deserialize, serialize } from "./serialization.js"; -import * as utils from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +} from "./indexModels"; +import { logger } from "./logger"; +import { createOdataMetadataPolicy } from "./odataMetadataPolicy"; +import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy"; +import { KnownSearchAudience } from "./searchAudience"; +import { IndexDocumentsClient } from "./searchIndexingBufferedSender"; +import { deserialize, serialize } from "./serialization"; +import * as utils from "./serviceUtils"; +import { createSpan } from "./tracing"; /** * Client options used to configure Cognitive Search API requests. @@ -119,41 +115,30 @@ export class SearchClient implements IndexDocumentsClient */ private readonly client: GeneratedClient; - /** - * A reference to the internal HTTP pipeline for use with raw requests - */ - public readonly pipeline: Pipeline; - /** * Creates an instance of SearchClient. * * Example usage: - * ```ts snippet:ReadmeSampleSearchClient - * import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; + * ```ts + * const { SearchClient, AzureKeyCredential } = require("@azure/search-documents"); * - * const searchClient = new SearchClient( + * const client = new SearchClient( * "", * "", - * new AzureKeyCredential(""), + * new AzureKeyCredential("") * ); * ``` * * Optionally, the type of the model can be used to enable strong typing and type hints: - * ```ts snippet:ReadmeSampleSearchClientWithModel - * import { SearchClient, AzureKeyCredential } from "@azure/search-documents"; - * + * ```ts * type TModel = { * keyName: string; * field1?: string | null; - * field2?: { - * anotherField?: string | null; - * } | null; + * field2?: { anotherField?: string | null } | null; * }; * - * const searchClient = new SearchClient( - * "", - * "", - * new AzureKeyCredential(""), + * const client = new SearchClient( + * ... * ); * ``` * @@ -203,7 +188,6 @@ export class SearchClient implements IndexDocumentsClient this.serviceVersion, internalClientPipelineOptions, ); - this.pipeline = this.client.pipeline; if (isTokenCredential(credential)) { const scope: string = options.audience @@ -224,7 +208,6 @@ export class SearchClient implements IndexDocumentsClient * Retrieves the number of documents in the index. * @param options - Options to the count operation. */ - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async getDocumentsCount(options: CountDocumentsOptions = {}): Promise { const { span, updatedOptions } = createSpan("SearchClient-getDocumentsCount", options); try { @@ -258,27 +241,31 @@ export class SearchClient implements IndexDocumentsClient * @param suggesterName - The name of the suggester as specified in the suggesters collection that's part of the index definition. * @param options - Options to the autocomplete operation. * @example - * ```ts snippet:ReadmeSampleAutocomplete - * import { SearchClient, AzureKeyCredential, SearchFieldArray } from "@azure/search-documents"; + * ```ts + * import { + * AzureKeyCredential, + * SearchClient, + * SearchFieldArray, + * } from "@azure/search-documents"; * * type TModel = { * key: string; - * azure?: { - * sdk: string | null; - * } | null; + * azure?: { sdk: string | null } | null; * }; * * const client = new SearchClient( * "endpoint.azure", * "indexName", - * new AzureKeyCredential("key"), + * new AzureKeyCredential("key") * ); * * const searchFields: SearchFieldArray = ["azure/sdk"]; * - * const autocompleteResult = await client.autocomplete("searchText", "suggesterName", { - * searchFields, - * }); + * const autocompleteResult = await client.autocomplete( + * "searchText", + * "suggesterName", + * { searchFields } + * ); * ``` */ public async autocomplete( @@ -330,20 +317,11 @@ export class SearchClient implements IndexDocumentsClient select, vectorSearchOptions, semanticSearchOptions, - hybridSearch, ...restOptions } = options as typeof options & { queryType: "semantic" }; - const { - semanticFields, - configurationName, - errorMode, - answers, - captions, - debugMode, - queryRewrites, - ...restSemanticOptions - } = semanticSearchOptions ?? {}; + const { configurationName, errorMode, answers, captions, ...restSemanticOptions } = + semanticSearchOptions ?? {}; const { queries, filterMode, ...restVectorOptions } = vectorSearchOptions ?? {}; const fullOptions: GeneratedSearchRequest = { @@ -352,7 +330,6 @@ export class SearchClient implements IndexDocumentsClient ...restOptions, ...nextPageParameters, searchFields: this.convertSearchFields(searchFields), - semanticFields: this.convertSemanticFields(semanticFields), select: this.convertSelect(select) || "*", orderBy: this.convertOrderBy(orderBy), includeTotalResultCount: includeTotalCount, @@ -361,10 +338,7 @@ export class SearchClient implements IndexDocumentsClient captions: this.convertQueryCaptions(captions), semanticErrorHandling: errorMode, semanticConfigurationName: configurationName, - debug: debugMode, - queryRewrites: this.convertQueryRewrites(queryRewrites), vectorFilterMode: filterMode, - hybridSearch: hybridSearch, }; const { span, updatedOptions } = createSpan("SearchClient-searchDocuments", options); @@ -482,20 +456,22 @@ export class SearchClient implements IndexDocumentsClient * @param searchText - Text to search * @param options - Options for the search operation. * @example - * ```ts snippet:ReadmeSampleSearchTModel - * import { SearchClient, AzureKeyCredential, SearchFieldArray } from "@azure/search-documents"; + * ```ts + * import { + * AzureKeyCredential, + * SearchClient, + * SearchFieldArray, + * } from "@azure/search-documents"; * * type TModel = { * key: string; - * azure?: { - * sdk: string | null; - * } | null; + * azure?: { sdk: string | null } | null; * }; * * const client = new SearchClient( * "endpoint.azure", * "indexName", - * new AzureKeyCredential("key"), + * new AzureKeyCredential("key") * ); * * const select = ["azure/sdk"] as const; @@ -538,20 +514,22 @@ export class SearchClient implements IndexDocumentsClient * @param suggesterName - The name of the suggester as specified in the suggesters collection that's part of the index definition. * @param options - Options for the suggest operation * @example - * ```ts snippet:ReadmeSampleSuggest - * import { SearchClient, AzureKeyCredential, SearchFieldArray } from "@azure/search-documents"; + * ```ts + * import { + * AzureKeyCredential, + * SearchClient, + * SearchFieldArray, + * } from "@azure/search-documents"; * * type TModel = { * key: string; - * azure?: { - * sdk: string | null; - * } | null; + * azure?: { sdk: string | null } | null; * }; * * const client = new SearchClient( * "endpoint.azure", * "indexName", - * new AzureKeyCredential("key"), + * new AzureKeyCredential("key") * ); * * const select = ["azure/sdk"] as const; @@ -641,7 +619,7 @@ export class SearchClient implements IndexDocumentsClient * This operation may partially succeed and not all document operations will * be reflected in the index. If you would like to treat this as an exception, * set the `throwOnAnyFailure` option to true. - * For more details about how merging works, see: https://learn.microsoft.com/rest/api/searchservice/AddUpdate-or-Delete-Documents + * For more details about how merging works, see: https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents * @param batch - An array of actions to perform on the index. * @param options - Additional options. */ @@ -709,7 +687,7 @@ export class SearchClient implements IndexDocumentsClient /** * Update a set of documents in the index. - * For more details about how merging works, see https://learn.microsoft.com/rest/api/searchservice/AddUpdate-or-Delete-Documents + * For more details about how merging works, see https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents * @param documents - The updated documents. * @param options - Additional options. */ @@ -737,7 +715,7 @@ export class SearchClient implements IndexDocumentsClient /** * Update a set of documents in the index or upload them if they don't exist. - * For more details about how merging works, see https://learn.microsoft.com/rest/api/searchservice/AddUpdate-or-Delete-Documents + * For more details about how merging works, see https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents * @param documents - The updated documents. * @param options - Additional options. */ @@ -879,13 +857,6 @@ export class SearchClient implements IndexDocumentsClient return searchFields; } - private convertSemanticFields(semanticFields?: string[]): string | undefined { - if (semanticFields) { - return semanticFields.join(","); - } - return semanticFields; - } - private convertOrderBy(orderBy?: string[]): string | undefined { if (orderBy) { return orderBy.join(","); @@ -899,7 +870,7 @@ export class SearchClient implements IndexDocumentsClient } const config = []; - const { answerType: output, count, threshold, maxAnswerLength } = answers; + const { answerType: output, count, threshold } = answers; if (count) { config.push(`count-${count}`); @@ -909,10 +880,6 @@ export class SearchClient implements IndexDocumentsClient config.push(`threshold-${threshold}`); } - if (maxAnswerLength) { - config.push(`maxcharlength-${maxAnswerLength}`); - } - if (config.length) { return output + `|${config.join(",")}`; } @@ -926,16 +893,12 @@ export class SearchClient implements IndexDocumentsClient } const config = []; - const { captionType: output, highlight, maxCaptionLength } = captions; + const { captionType: output, highlight } = captions; if (highlight !== undefined) { config.push(`highlight-${highlight}`); } - if (maxCaptionLength) { - config.push(`maxcharlength-${maxCaptionLength}`); - } - if (config.length) { return output + `|${config.join(",")}`; } @@ -944,43 +907,6 @@ export class SearchClient implements IndexDocumentsClient } private convertVectorQuery>(vectorQuery: T): GeneratedVectorQuery { - switch (vectorQuery.kind) { - case "text": { - const { fields, queryRewrites, ...restFields } = vectorQuery; - return { - ...restFields, - fields: this.convertVectorQueryFields(fields), - queryRewrites: this.convertQueryRewrites(queryRewrites), - }; - } - case "vector": - case "imageUrl": - case "imageBinary": { - return { ...vectorQuery, fields: this.convertVectorQueryFields(vectorQuery?.fields) }; - } - default: { - logger.warning("Unknown vector query kind; sending without serialization"); - return vectorQuery as any; - } - } - } - - private convertQueryRewrites(queryRewrites?: QueryRewrites): GeneratedQueryRewrites | undefined { - if (!queryRewrites) { - return queryRewrites; - } - - const { rewritesType: baseOutput } = queryRewrites; - switch (baseOutput) { - case "generative": { - const { count } = queryRewrites; - - const config = [...(count === undefined ? [] : [`count-${count}`])]; - if (config.length) return baseOutput + `|${config.join(",")}`; - return baseOutput; - } - default: - return baseOutput; - } + return { ...vectorQuery, fields: this.convertVectorQueryFields(vectorQuery?.fields) }; } } diff --git a/sdk/search/search-documents/src/searchIndexClient.ts b/sdk/search/search-documents/src/searchIndexClient.ts index 872e5dbae1c8..2b8a26c1bf82 100644 --- a/sdk/search/search-documents/src/searchIndexClient.ts +++ b/sdk/search/search-documents/src/searchIndexClient.ts @@ -1,52 +1,42 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /// -import type { KeyCredential, TokenCredential } from "@azure/core-auth"; -import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; -import type { Pipeline } from "@azure/core-rest-pipeline"; +import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-auth"; +import { InternalClientPipelineOptions } from "@azure/core-client"; +import { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import type { AnalyzeResult } from "./generated/service/models/index.js"; -import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient.js"; -import { logger } from "./logger.js"; -import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; -import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; -import { KnownSearchAudience } from "./searchAudience.js"; -import type { SearchClientOptions as GetSearchClientOptions } from "./searchClient.js"; -import { SearchClient } from "./searchClient.js"; -import type { - AliasIterator, +import { AnalyzeResult } from "./generated/service/models"; +import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient"; +import { logger } from "./logger"; +import { createOdataMetadataPolicy } from "./odataMetadataPolicy"; +import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy"; +import { KnownSearchAudience } from "./searchAudience"; +import { SearchClient, SearchClientOptions as GetSearchClientOptions } from "./searchClient"; +import { AnalyzeTextOptions, - CreateAliasOptions, CreateIndexOptions, - CreateOrUpdateAliasOptions, CreateOrUpdateIndexOptions, CreateOrUpdateSynonymMapOptions, CreateSynonymMapOptions, - DeleteAliasOptions, DeleteIndexOptions, DeleteSynonymMapOptions, - GetAliasOptions, GetIndexOptions, GetIndexStatisticsOptions, GetServiceStatisticsOptions, GetSynonymMapsOptions, IndexIterator, IndexNameIterator, - ListAliasesOptions, ListIndexesOptions, ListSynonymMapsOptions, SearchIndex, - SearchIndexAlias, SearchIndexStatistics, SearchServiceStatistics, SynonymMap, -} from "./serviceModels.js"; -import * as utils from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +} from "./serviceModels"; +import * as utils from "./serviceUtils"; +import { createSpan } from "./tracing"; /** * Client options used to configure Cognitive Search API requests. @@ -99,11 +89,6 @@ export class SearchIndexClient { */ private readonly client: GeneratedClient; - /** - * A reference to the internal HTTP pipeline for use with raw requests - */ - public readonly pipeline: Pipeline; - /** * Used to authenticate requests to the service. */ @@ -118,10 +103,13 @@ export class SearchIndexClient { * Creates an instance of SearchIndexClient. * * Example usage: - * ```ts snippet:ReadmeSampleSearchIndexClient - * import { SearchIndexClient, AzureKeyCredential } from "@azure/search-documents"; + * ```ts + * const { SearchIndexClient, AzureKeyCredential } = require("@azure/search-documents"); * - * const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); + * const client = new SearchIndexClient( + * "", + * new AzureKeyCredential(""); + * ); * ``` * @param endpoint - The endpoint of the search service * @param credential - Used to authenticate requests to the service. @@ -162,7 +150,6 @@ export class SearchIndexClient { this.serviceVersion, internalClientPipelineOptions, ); - this.pipeline = this.client.pipeline; if (isTokenCredential(credential)) { const scope: string = this.options.audience @@ -226,52 +213,6 @@ export class SearchIndexClient { }; } - private async *listAliasesPage( - options: ListAliasesOptions = {}, - ): AsyncIterableIterator { - const { span, updatedOptions } = createSpan("SearchIndexClient-listAliases", options); - try { - const result = await this.client.aliases.list(updatedOptions); - yield result.aliases; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - private async *listAliasesAll( - options: ListAliasesOptions = {}, - ): AsyncIterableIterator { - for await (const page of this.listAliasesPage(options)) { - yield* page; - } - } - - /** - * Lists all aliases available for a search service. - * @param options - The options parameters. - */ - public listAliases(options: ListAliasesOptions = {}): AliasIterator { - const iter = this.listAliasesAll(options); - - return { - next() { - return iter.next(); - }, - [Symbol.asyncIterator]() { - return this; - }, - byPage: () => { - return this.listAliasesPage(options); - }, - }; - } - private async *listIndexesNamesPage( options: ListIndexesOptions = {}, ): AsyncIterableIterator { @@ -306,7 +247,6 @@ export class SearchIndexClient { * Retrieves a list of names of existing indexes in the service. * @param options - Options to the list index operation. */ - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public listIndexesNames(options: ListIndexesOptions = {}): IndexNameIterator { const iter = this.listIndexesNamesAll(options); @@ -347,7 +287,6 @@ export class SearchIndexClient { * Retrieves a list of names of existing SynonymMaps in the service. * @param options - Options to the list SynonymMaps operation. */ - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async listSynonymMapsNames(options: ListSynonymMapsOptions = {}): Promise> { const { span, updatedOptions } = createSpan("SearchIndexClient-listSynonymMapsNames", options); try { @@ -395,7 +334,6 @@ export class SearchIndexClient { */ public async getSynonymMap( synonymMapName: string, - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options options: GetSynonymMapsOptions = {}, ): Promise { const { span, updatedOptions } = createSpan("SearchIndexClient-getSynonymMaps", options); @@ -600,114 +538,6 @@ export class SearchIndexClient { } } - /** - * Creates a new search alias or updates an alias if it already exists. - * @param alias - The definition of the alias to create or update. - * @param options - The options parameters. - */ - public async createOrUpdateAlias( - alias: SearchIndexAlias, - options: CreateOrUpdateAliasOptions = {}, - ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createOrUpdateAlias", options); - try { - const etag = options.onlyIfUnchanged ? alias.etag : undefined; - - const result = await this.client.aliases.createOrUpdate(alias.name, alias, { - ...updatedOptions, - ifMatch: etag, - }); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - /** - * Creates a new search alias. - * @param alias - The definition of the alias to create. - * @param options - The options parameters. - */ - public async createAlias( - alias: SearchIndexAlias, - options: CreateAliasOptions = {}, - ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-createAlias", options); - try { - const result = await this.client.aliases.create(alias, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - /** - * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no - * recovery option. The mapped index is untouched by this operation. - * @param alias - Alias/Name name of the alias to delete. - * @param options - The options parameters. - */ - public async deleteAlias( - alias: string | SearchIndexAlias, - options: DeleteAliasOptions = {}, - ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-deleteAlias", options); - try { - const aliasName: string = typeof alias === "string" ? alias : alias.name; - const etag = - typeof alias === "string" ? undefined : options.onlyIfUnchanged ? alias.etag : undefined; - - await this.client.aliases.delete(aliasName, { - ...updatedOptions, - ifMatch: etag, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - /** - * Retrieves an alias definition. - * @param aliasName - The name of the alias to retrieve. - * @param options - The options parameters. - */ - public async getAlias( - aliasName: string, - options: GetAliasOptions = {}, - ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexClient-getAlias", options); - try { - const result = await this.client.aliases.get(aliasName, updatedOptions); - return result; - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - /** * Retrieves statistics about an index, such as the count of documents and the size * of index storage. diff --git a/sdk/search/search-documents/src/searchIndexerClient.ts b/sdk/search/search-documents/src/searchIndexerClient.ts index 319d4eb29f6e..b378dea65105 100644 --- a/sdk/search/search-documents/src/searchIndexerClient.ts +++ b/sdk/search/search-documents/src/searchIndexerClient.ts @@ -1,19 +1,17 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { KeyCredential, TokenCredential } from "@azure/core-auth"; -import { isTokenCredential } from "@azure/core-auth"; -import type { InternalClientPipelineOptions } from "@azure/core-client"; -import type { ExtendedCommonClientOptions } from "@azure/core-http-compat"; -import type { Pipeline } from "@azure/core-rest-pipeline"; +import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-auth"; +import { InternalClientPipelineOptions } from "@azure/core-client"; +import { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import type { SearchIndexerStatus } from "./generated/service/models/index.js"; -import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient.js"; -import { logger } from "./logger.js"; -import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; -import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; -import { KnownSearchAudience } from "./searchAudience.js"; -import type { +import { SearchIndexerStatus } from "./generated/service/models"; +import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient"; +import { logger } from "./logger"; +import { createOdataMetadataPolicy } from "./odataMetadataPolicy"; +import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy"; +import { KnownSearchAudience } from "./searchAudience"; +import { CreateDataSourceConnectionOptions, CreateIndexerOptions, CreateorUpdateDataSourceConnectionOptions, @@ -30,16 +28,14 @@ import type { ListDataSourceConnectionsOptions, ListIndexersOptions, ListSkillsetsOptions, - ResetDocumentsOptions, ResetIndexerOptions, - ResetSkillsOptions, RunIndexerOptions, SearchIndexer, SearchIndexerDataSourceConnection, SearchIndexerSkillset, -} from "./serviceModels.js"; -import * as utils from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +} from "./serviceModels"; +import * as utils from "./serviceUtils"; +import { createSpan } from "./tracing"; /** * Client options used to configure Cognitive Search API requests. @@ -92,19 +88,17 @@ export class SearchIndexerClient { */ private readonly client: GeneratedClient; - /** - * A reference to the internal HTTP pipeline for use with raw requests - */ - public readonly pipeline: Pipeline; - /** * Creates an instance of SearchIndexerClient. * * Example usage: - * ```ts snippet:ReadmeSampleSearchIndexerClient - * import { SearchIndexerClient, AzureKeyCredential } from "@azure/search-documents"; + * ```ts + * const { SearchIndexerClient, AzureKeyCredential } = require("@azure/search-documents"); * - * const indexerClient = new SearchIndexerClient("", new AzureKeyCredential("")); + * const client = new SearchIndexerClient( + * "", + * new AzureKeyCredential(""); + * ); * ``` * @param endpoint - The endpoint of the search service * @param credential - Used to authenticate requests to the service. @@ -143,7 +137,6 @@ export class SearchIndexerClient { this.serviceVersion, internalClientPipelineOptions, ); - this.pipeline = this.client.pipeline; if (isTokenCredential(credential)) { const scope: string = options.audience @@ -184,7 +177,6 @@ export class SearchIndexerClient { * Retrieves a list of names of existing indexers in the service. * @param options - Options to the list indexers operation. */ - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async listIndexersNames(options: ListIndexersOptions = {}): Promise> { const { span, updatedOptions } = createSpan("SearchIndexerClient-listIndexersNames", options); try { @@ -234,7 +226,6 @@ export class SearchIndexerClient { * @param options - Options to the list indexers operation. */ public async listDataSourceConnectionsNames( - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options options: ListDataSourceConnectionsOptions = {}, ): Promise> { const { span, updatedOptions } = createSpan( @@ -284,7 +275,6 @@ export class SearchIndexerClient { * Retrieves a list of names of existing Skillsets in the service. * @param options - Options to the list Skillsets operation. */ - // eslint-disable-next-line @azure/azure-sdk/ts-naming-options public async listSkillsetsNames(options: ListSkillsetsOptions = {}): Promise> { const { span, updatedOptions } = createSpan("SearchIndexerClient-listSkillsetsNames", options); try { @@ -743,58 +733,4 @@ export class SearchIndexerClient { span.end(); } } - - /** - * Resets specific documents in the datasource to be selectively re-ingested by the indexer. - * @param indexerName - The name of the indexer to reset documents for. - * @param options - Additional optional arguments. - */ - public async resetDocuments( - indexerName: string, - options: ResetDocumentsOptions = {}, - ): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-resetDocs", options); - try { - await this.client.indexers.resetDocs(indexerName, { - ...updatedOptions, - keysOrIds: { - documentKeys: updatedOptions.documentKeys, - datasourceDocumentIds: updatedOptions.datasourceDocumentIds, - }, - }); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } - - /** - * Reset an existing skillset in a search service. - * @param skillsetName - The name of the skillset to reset. - * @param skillNames - The names of skills to reset. - * @param options - The options parameters. - */ - public async resetSkills(skillsetName: string, options: ResetSkillsOptions = {}): Promise { - const { span, updatedOptions } = createSpan("SearchIndexerClient-resetSkills", options); - try { - await this.client.skillsets.resetSkills( - skillsetName, - { skillNames: options.skillNames }, - updatedOptions, - ); - } catch (e: any) { - span.setStatus({ - status: "error", - error: e.message, - }); - throw e; - } finally { - span.end(); - } - } } diff --git a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts index 8f163e76c7c1..200681fabdea 100644 --- a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts +++ b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts @@ -1,13 +1,13 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { OperationOptions } from "@azure/core-client"; -import type { RestError } from "@azure/core-rest-pipeline"; +import { OperationOptions } from "@azure/core-client"; +import { RestError } from "@azure/core-rest-pipeline"; import { delay } from "@azure/core-util"; -import EventEmitter from "node:events"; -import type { IndexDocumentsResult } from "./generated/data/models/index.js"; -import { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; -import type { +import EventEmitter from "events"; +import { IndexDocumentsResult } from "./generated/data/models"; +import { IndexDocumentsBatch } from "./indexDocumentsBatch"; +import { IndexDocumentsAction, IndexDocumentsOptions, SearchIndexingBufferedSenderDeleteDocumentsOptions, @@ -16,9 +16,9 @@ import type { SearchIndexingBufferedSenderMergeOrUploadDocumentsOptions, SearchIndexingBufferedSenderOptions, SearchIndexingBufferedSenderUploadDocumentsOptions, -} from "./indexModels.js"; -import { getRandomIntegerInclusive } from "./serviceUtils.js"; -import { createSpan } from "./tracing.js"; +} from "./indexModels"; +import { getRandomIntegerInclusive } from "./serviceUtils"; +import { createSpan } from "./tracing"; /** * Index Documents Client diff --git a/sdk/search/search-documents/src/serialization.ts b/sdk/search/search-documents/src/serialization.ts index b445c8d7b99f..cb47dad56a6d 100644 --- a/sdk/search/search-documents/src/serialization.ts +++ b/sdk/search/search-documents/src/serialization.ts @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import GeographyPoint from "./geographyPoint.js"; -import { walk } from "./walk.js"; +import GeographyPoint from "./geographyPoint"; +import { walk } from "./walk"; const ISO8601DateRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,3})?Z$/i; const GeoJSONPointTypeName = "Point"; diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index bc9f052564ad..af471e70e576 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -1,34 +1,26 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { OperationOptions } from "@azure/core-client"; -import type { PagedAsyncIterableIterator } from "@azure/core-paging"; -import type { - AIServicesAccountKey, - AIStudioModelCatalogName, +import { OperationOptions } from "@azure/core-client"; +import { PagedAsyncIterableIterator } from "@azure/core-paging"; +import { AsciiFoldingTokenFilter, - AzureMachineLearningSkill, AzureOpenAIModelName, - AzureOpenAITokenizerParameters, BinaryQuantizationCompression, BM25Similarity, CharFilterName, CjkBigramTokenFilter, ClassicSimilarity, ClassicTokenizer, - CognitiveServicesAccount as BaseCognitiveServicesAccount, CognitiveServicesAccountKey, CommonGramTokenFilter, ConditionalSkill, CorsOptions, CustomEntity, - CustomNormalizer, DefaultCognitiveServicesAccount, DictionaryDecompounderTokenFilter, DistanceScoringFunction, DocumentExtractionSkill, - DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, - DocumentIntelligenceLayoutSkillOutputMode, EdgeNGramTokenFilterSide, EdgeNGramTokenizer, ElisionTokenFilter, @@ -71,20 +63,15 @@ import type { LanguageDetectionSkill, LengthTokenFilter, LexicalAnalyzerName, - LexicalNormalizerName, LexicalTokenizerName, LimitTokenFilter, LuceneStandardAnalyzer, MagnitudeScoringFunction, MappingCharFilter, - MarkdownHeaderDepth, - MarkdownParsingSubmode, MergeSkill, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, - NativeBlobSoftDeleteDeletionDetectionPolicy, NGramTokenizer, - OcrLineEnding, PathHierarchyTokenizerV2 as PathHierarchyTokenizer, PatternCaptureTokenFilter, PatternReplaceCharFilter, @@ -92,7 +79,6 @@ import type { PhoneticTokenFilter, ScalarQuantizationCompression, ScoringFunctionAggregation, - SearchAlias, SearchIndexerDataContainer, SearchIndexerDataNoneIdentity, SearchIndexerDataUserAssignedIdentity, @@ -107,7 +93,6 @@ import type { ShingleTokenFilter, SnowballTokenFilter, SoftDeleteColumnDeletionDetectionPolicy, - SplitSkillUnit, SqlIntegratedChangeTrackingPolicy, StemmerOverrideTokenFilter, StemmerTokenFilter, @@ -125,7 +110,7 @@ import type { VectorSearchProfile, VectorSearchVectorizerKind, WordDelimiterTokenFilter, -} from "./generated/service/models/index.js"; +} from "./generated/service/models"; /** * Options for a list skillsets operation. @@ -284,11 +269,6 @@ export type GetAliasOptions = OperationOptions; */ export type ListAliasesOptions = OperationOptions; -/** - * Search Alias object. - */ -export type SearchIndexAlias = SearchAlias; - /** * Options for create synonymmap operation. */ @@ -349,14 +329,6 @@ export interface CreateOrUpdateSkillsetOptions extends OperationOptions { * If set to true, Resource will be updated only if the etag matches. */ onlyIfUnchanged?: boolean; - /** - * Ignores cache reset requirements. - */ - skipIndexerResetRequirementForCache?: boolean; - /** - * Disables cache reprocessing change detection. - */ - disableCacheReprocessingChangeDetection?: boolean; } /** @@ -377,10 +349,6 @@ export interface CreateorUpdateIndexerOptions extends OperationOptions { * If set to true, Resource will be updated only if the etag matches. */ onlyIfUnchanged?: boolean; - /** Ignores cache reset requirements. */ - skipIndexerResetRequirementForCache?: boolean; - /** Disables cache reprocessing change detection. */ - disableCacheReprocessingChangeDetection?: boolean; } /** @@ -391,10 +359,6 @@ export interface CreateorUpdateDataSourceConnectionOptions extends OperationOpti * If set to true, Resource will be updated only if the etag matches. */ onlyIfUnchanged?: boolean; - /** - * Ignores cache reset requirements. - */ - skipIndexerResetRequirementForCache?: boolean; } /** @@ -469,11 +433,6 @@ export interface AnalyzeRequest { * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest. */ tokenizerName?: LexicalTokenizerName; - /** - * The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is - * an enum containing built-in analyzer names. - */ - normalizerName?: LexicalNormalizerName; /** * An optional list of token filters to use when breaking the given text. This parameter can only * be set when using the tokenizer parameter. @@ -627,24 +586,14 @@ export interface WebApiSkill extends BaseSearchIndexerSkill { authIdentity?: SearchIndexerDataIdentity; } -/** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */ -export interface VisionVectorizeSkill extends BaseSearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; - /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ - modelVersion?: string; -} - /** * Contains the possible cases for Skill. */ export type SearchIndexerSkill = - | AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill - | DocumentIntelligenceLayoutSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 @@ -659,37 +608,14 @@ export type SearchIndexerSkill = | ShaperSkill | SplitSkill | TextTranslationSkill - | VisionVectorizeSkill | WebApiSkill; -/** A skill that extracts content and layout information (as markdown), via Azure AI Services, from files within the enrichment pipeline. */ -export interface DocumentIntelligenceLayoutSkill extends BaseSearchIndexerSkill { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; - /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ - outputMode?: DocumentIntelligenceLayoutSkillOutputMode; - /** The depth of headers in the markdown output. Default is h6. */ - markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; -} - /** * Contains the possible cases for CognitiveServicesAccount. */ export type CognitiveServicesAccount = | DefaultCognitiveServicesAccount - | CognitiveServicesAccountKey - | AIServicesAccountKey - | AIServicesAccountIdentity; - -/** The multi-region account of an Azure AI service resource that's attached to a skillset. */ -export interface AIServicesAccountIdentity extends BaseCognitiveServicesAccount { - /** Polymorphic discriminator, which specifies the different types this object can be */ - odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; - /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - identity?: SearchIndexerDataIdentity; - /** The subdomain url for the corresponding AI Service. */ - subdomainUrl: string; -} + | CognitiveServicesAccountKey; /** * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is * implemented using Apache Lucene. @@ -904,11 +830,6 @@ export type TokenFilter = */ export type CharFilter = MappingCharFilter | PatternReplaceCharFilter; -/** - * Contains the possible cases for LexicalNormalizer. - */ -export type LexicalNormalizer = CustomNormalizer; - /** * Contains the possible cases for ScoringFunction. */ @@ -966,10 +887,9 @@ export interface SimpleField { * returned in a search result. You can disable this option if you don't plan to return the field * contents in a search response to save on storage overhead. This can only be set during index * creation and only for vector fields. This property cannot be changed for existing fields or set - * as false for new fields. If this property is set to `false`, the property `hidden` must be set to - * `true`. This property must be true or unset for key fields, for new fields, and for non-vector - * fields, and it must be null for complex fields. Disabling this property will reduce index - * storage requirements. The default is true for vector fields. + * as false for new fields. If this property is set as false, the property 'hidden' must be set to + * 'true'. This property must be false or unset for key fields, for new fields, and for non-vector + * fields. Disabling this property will reduce index storage requirements. */ stored?: boolean; /** @@ -977,50 +897,50 @@ export interface SimpleField { * analysis such as word-breaking during indexing. If you set a searchable field to a value like * "sunny day", internally it will be split into the individual tokens "sunny" and "day". This * enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) - * are searchable by default. This property must be false for simple - * fields of other non-string data types. - * Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text + * are searchable by default. This property must be false for simple fields of other non-string + * data types. Note: searchable fields consume extra space + * in your index to accommodate additional tokenized versions of the field value for full-text * searches. If you want to save space in your index and you don't need a field to be included in * searches, set searchable to false. Default is false. */ searchable?: boolean; /** - * A value indicating whether to enable the field to be referenced in $filter queries. `filterable` - * differs from `searchable` in how strings are handled. Fields of type Edm.String or - * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are - * for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq - * 'sunny' will find no matches, but $filter=f eq 'sunny day' will. - * Default is false. + * A value indicating whether to enable the field to be referenced in $filter queries. filterable + * differs from searchable in how strings are handled. Fields of type Edm.String or + * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for + * exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' + * will find no matches, but $filter=f eq 'sunny day' will. Default is false. */ filterable?: boolean; /** * A value indicating whether to enable the field to be referenced in $orderby expressions. By - * default, the service sorts results by score, but in many experiences users will want - * to sort by fields in the documents. A simple field can be sortable only if it is single-valued - * (it has a single value in the scope of the parent document). Simple collection fields cannot - * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also + * default, the search engine sorts results by score, but in many experiences users will want to + * sort by fields in the documents. A simple field can be sortable only if it is single-valued (it + * has a single value in the scope of the parent document). Simple collection fields cannot be + * sortable, since they are multi-valued. Simple sub-fields of complex collections are also * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent * field, or an ancestor field, that's the complex collection. The default is false. + * */ sortable?: boolean; /** * A value indicating whether to enable the field to be referenced in facet queries. Typically * used in a presentation of search results that includes hit count by category (for example, - * search for digital cameras and see hits by brand, by megapixels, by price, and so on). - * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. - * Default is false for all other simple fields. + * search for digital cameras and see hits by brand, by megapixels, by price, and so on). Fields + * of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is + * false. */ facetable?: boolean; /** - * The name of the analyzer to use for the field. This option can be used only with - * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. - * Once the analyzer is chosen, it cannot be changed for the field. + * The name of the analyzer to use for the field. This option can be used only with searchable + * fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the + * analyzer is chosen, it cannot be changed for the field. */ analyzerName?: LexicalAnalyzerName; /** * The name of the analyzer used at search time for the field. This option can be used only with - * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set together - * with the `analyzerName` option. This property cannot be set to the name of a language + * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set + * together with the `analyzerName` option. This property cannot be set to the name of a language * analyzer; use the `analyzerName` property instead if you need a language analyzer. This * analyzer can be updated on an existing field. */ @@ -1028,22 +948,18 @@ export interface SimpleField { /** * The name of the analyzer used at indexing time for the field. This option can be used only * with searchable fields. It must be set together with searchAnalyzer and it cannot be set - * together with the `analyzerName` option. Once the analyzer is chosen, it cannot be changed for the - * field. KnownAnalyzerNames is an enum containing known values. + * together with the analyzer option. This property cannot be set to the name of a language + * analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer + * is chosen, it cannot be changed for the field. */ indexAnalyzerName?: LexicalAnalyzerName; /** * A list of the names of synonym maps to associate with this field. This option can be used only * with searchable fields. Currently only one synonym map per field is supported. Assigning a - * synonym map to a field ensures that query terms targeting that field are expanded at - * query-time using the rules in the synonym map. This attribute can be changed on existing - * fields. + * synonym map to a field ensures that query terms targeting that field are expanded at query-time + * using the rules in the synonym map. This attribute can be changed on existing fields. */ synonymMapNames?: string[]; - /** - * The name of the normalizer used at indexing time for the field. - */ - normalizerName?: LexicalNormalizerName; /** * The dimensionality of the vector field. */ @@ -1118,20 +1034,15 @@ export interface SynonymMap { * as needed during iteration. Use .byPage() to make one request to the server * per iteration. */ +// eslint-disable-next-line @typescript-eslint/ban-types export type IndexIterator = PagedAsyncIterableIterator; -/** - * An iterator for listing the aliases that exist in the Search service. This will make requests - * as needed during iteration. Use .byPage() to make one request to the server - * per iteration. - */ -export type AliasIterator = PagedAsyncIterableIterator; - /** * An iterator for listing the indexes that exist in the Search service. Will make requests * as needed during iteration. Use .byPage() to make one request to the server * per iteration. */ +// eslint-disable-next-line @typescript-eslint/ban-types export type IndexNameIterator = PagedAsyncIterableIterator; /** @@ -1181,10 +1092,6 @@ export interface SearchIndex { * The character filters for the index. */ charFilters?: CharFilter[]; - /** - * The normalizers for the index. - */ - normalizers?: LexicalNormalizer[]; /** * A description of an encryption key that you create in Azure Key Vault. This key is used to * provide an additional level of encryption-at-rest for your data when you want full assurance @@ -1295,11 +1202,6 @@ export interface SearchIndexer { * paid services created on or after January 1, 2019. */ encryptionKey?: SearchResourceEncryptionKey; - /** - * Adds caching to an enrichment pipeline to allow for incremental modification steps without - * having to rebuild the index every time. - */ - cache?: SearchIndexerCache; } /** @@ -1443,10 +1345,12 @@ export enum KnownTokenizerNames { /** * Divides text using language-specific rules. */ + // eslint-disable-next-line @typescript-eslint/no-shadow MicrosoftLanguageTokenizer = "microsoft_language_tokenizer", /** * Divides text using language-specific rules and reduces words to their base forms. */ + // eslint-disable-next-line @typescript-eslint/no-shadow MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer", /** * Tokenizes the input into n-grams of the given size(s). See @@ -1630,7 +1534,7 @@ export enum KnownTokenFilterNames { SoraniNormalization = "sorani_normalization", /** * Language specific stemming filter. See - * https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters + * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters */ Stemmer = "stemmer", /** @@ -1678,7 +1582,7 @@ export enum KnownCharFilterNames { /** * Defines values for AnalyzerName. - * See https://learn.microsoft.com/rest/api/searchservice/Language-support + * See https://docs.microsoft.com/rest/api/searchservice/Language-support * @readonly */ export enum KnownAnalyzerNames { @@ -2072,9 +1976,7 @@ export type SearchIndexerDataIdentity = /** * Contains the possible cases for DataDeletionDetectionPolicy. */ -export type DataDeletionDetectionPolicy = - | SoftDeleteColumnDeletionDetectionPolicy - | NativeBlobSoftDeleteDeletionDetectionPolicy; +export type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy; /** * Represents a datasource definition, which can be used to configure an indexer. @@ -2137,14 +2039,11 @@ export interface SearchIndexerDataSourceConnection { export interface VectorSearch { /** Defines combinations of configurations to use with vector search. */ profiles?: VectorSearchProfile[]; - /** Contains configuration options specific to the algorithm used during indexing and/or querying. */ + /** Contains configuration options specific to the algorithm used during indexing or querying. */ algorithms?: VectorSearchAlgorithmConfiguration[]; /** Contains configuration options on how to vectorize text vector queries. */ vectorizers?: VectorSearchVectorizer[]; - /** - * Contains configuration options specific to the compression method used during indexing or - * querying. - */ + /** Contains configuration options specific to the compression method used during indexing or querying. */ compressions?: VectorSearchCompression[]; } @@ -2277,98 +2176,15 @@ export interface WebApiParameters { } /** Contains configuration options on how to vectorize text vector queries. */ -export type VectorSearchVectorizer = - | AIServicesVisionVectorizer - | AzureMachineLearningVectorizer - | AzureOpenAIVectorizer - | WebApiVectorizer; - -/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ -export interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "aiServicesVision"; - /** Contains the parameters specific to AI Services Vision embedding vectorization. */ - parameters?: AIServicesVisionParameters; -} - -/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */ -export interface AIServicesVisionParameters { - /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */ - modelVersion?: string; - /** The resource URI of the AI Services resource. */ - resourceUri: string; - /** API key of the designated AI Services resource. */ - apiKey?: string; - /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ - authIdentity?: SearchIndexerDataIdentity; -} - -/** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. */ -export interface AzureMachineLearningVectorizer extends BaseVectorSearchVectorizer { - /** Polymorphic discriminator, which specifies the different types this object can be */ - kind: "aml"; - /** Specifies the properties of the AML vectorizer. */ - amlParameters?: AzureMachineLearningVectorizerParameters; -} - -/** Specifies the properties for connecting to an AML vectorizer. */ -export type AzureMachineLearningVectorizerParameters = - | NoAuthAzureMachineLearningVectorizerParameters - | KeyAuthAzureMachineLearningVectorizerParameters - | TokenAuthAzureMachineLearningVectorizerParameters; - -/** Specifies the properties common between all AML vectorizer auth types. */ -export interface BaseAzureMachineLearningVectorizerParameters { - /** When specified, indicates the timeout for the http client making the API call. */ - timeout?: string; - /** The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. */ - modelName?: AIStudioModelCatalogName; -} +export type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVectorizer; -/** - * Specifies the properties for connecting to an AML vectorizer with no authentication. - */ -export interface NoAuthAzureMachineLearningVectorizerParameters - extends BaseAzureMachineLearningVectorizerParameters { - /** Indicates how the service should attempt to identify itself to the AML instance */ - authKind: "none"; - /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ - scoringUri: string; -} - -/** - * Specifies the properties for connecting to an AML vectorizer with an authentication key. - */ -export interface KeyAuthAzureMachineLearningVectorizerParameters - extends BaseAzureMachineLearningVectorizerParameters { - /** Indicates how the service should attempt to identify itself to the AML instance */ - authKind: "key"; - /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */ - scoringUri: string; - /** The key for the AML service. */ - authenticationKey: string; -} - -/** - * Specifies the properties for connecting to an AML vectorizer with a managed identity. - */ -export interface TokenAuthAzureMachineLearningVectorizerParameters - extends BaseAzureMachineLearningVectorizerParameters { - /** Indicates how the service should attempt to identify itself to the AML instance */ - authKind: "token"; - /** The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/\{guid\}/resourceGroups/\{resource-group-name\}/Microsoft.MachineLearningServices/workspaces/\{workspace-name\}/services/\{service_name\}. */ - resourceId: string; - /** The region the AML service is deployed in. */ - region?: string; -} - -/** Specifies the parameters for connecting to the Azure OpenAI resource. */ +/** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */ export interface AzureOpenAIParameters { - /** The resource URI of the Azure OpenAI resource. */ + /** The resource uri for your Azure Open AI resource. */ resourceUrl?: string; - /** ID of the Azure OpenAI model deployment on the designated resource. */ + /** ID of your Azure Open AI model deployment on the designated resource. */ deploymentId?: string; - /** API key of the designated Azure OpenAI resource. */ + /** API key for the designated Azure Open AI resource. */ apiKey?: string; /** The user-assigned managed identity used for outbound connections. */ authIdentity?: SearchIndexerDataIdentity; @@ -2406,7 +2222,7 @@ export interface IndexingParametersConfiguration { failOnUnsupportedContentType?: boolean; /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */ failOnUnprocessableDocument?: boolean; - /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */ + /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. */ indexStorageMetadataOnlyForOversizedDocuments?: boolean; /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */ delimitedTextHeaders?: string; @@ -2414,10 +2230,6 @@ export interface IndexingParametersConfiguration { delimitedTextDelimiter?: string; /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */ firstLineContainsHeaders?: boolean; - /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */ - markdownParsingSubmode?: MarkdownParsingSubmode; - /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */ - markdownHeaderDepth?: MarkdownHeaderDepth; /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */ documentRoot?: string; /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */ @@ -2510,12 +2322,10 @@ export interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill { export interface OcrSkill extends BaseSearchIndexerSkill { /** Polymorphic discriminator, which specifies the different types this object can be */ odatatype: "#Microsoft.Skills.Vision.OcrSkill"; - /** A value indicating which language code to use. Default is `en`. */ + /** A value indicating which language code to use. Default is en. */ defaultLanguageCode?: OcrSkillLanguage; /** A value indicating to turn orientation detection on or not. Default is false. */ shouldDetectOrientation?: boolean; - /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */ - lineEnding?: OcrLineEnding; } /** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */ @@ -2554,20 +2364,12 @@ export interface SentimentSkill extends BaseSearchIndexerSkill { export interface SplitSkill extends BaseSearchIndexerSkill { /** Polymorphic discriminator, which specifies the different types this object can be */ odatatype: "#Microsoft.Skills.Text.SplitSkill"; - /** A value indicating which language code to use. Default is `en`. */ + /** A value indicating which language code to use. Default is en. */ defaultLanguageCode?: SplitSkillLanguage; /** A value indicating which split mode to perform. */ textSplitMode?: TextSplitMode; /** The desired maximum page length. Default is 10000. */ maxPageLength?: number; - /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */ - pageOverlapLength?: number; - /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */ - maximumPagesToTake?: number; - /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */ - unit?: SplitSkillUnit; - /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */ - azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; } /** A skill to translate text from one language to another. */ @@ -2597,105 +2399,6 @@ export interface ImageAnalysisSkill extends BaseSearchIndexerSkill { /** Contains configuration options specific to the compression method used during indexing or querying. */ export type VectorSearchCompression = BinaryQuantizationCompression | ScalarQuantizationCompression; -/** - * Defines values for LexicalAnalyzerName. - * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **ar.microsoft**: Microsoft analyzer for Arabic. - * **ar.lucene**: Lucene analyzer for Arabic. - * **hy.lucene**: Lucene analyzer for Armenian. - * **bn.microsoft**: Microsoft analyzer for Bangla. - * **eu.lucene**: Lucene analyzer for Basque. - * **bg.microsoft**: Microsoft analyzer for Bulgarian. - * **bg.lucene**: Lucene analyzer for Bulgarian. - * **ca.microsoft**: Microsoft analyzer for Catalan. - * **ca.lucene**: Lucene analyzer for Catalan. - * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). - * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). - * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). - * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). - * **hr.microsoft**: Microsoft analyzer for Croatian. - * **cs.microsoft**: Microsoft analyzer for Czech. - * **cs.lucene**: Lucene analyzer for Czech. - * **da.microsoft**: Microsoft analyzer for Danish. - * **da.lucene**: Lucene analyzer for Danish. - * **nl.microsoft**: Microsoft analyzer for Dutch. - * **nl.lucene**: Lucene analyzer for Dutch. - * **en.microsoft**: Microsoft analyzer for English. - * **en.lucene**: Lucene analyzer for English. - * **et.microsoft**: Microsoft analyzer for Estonian. - * **fi.microsoft**: Microsoft analyzer for Finnish. - * **fi.lucene**: Lucene analyzer for Finnish. - * **fr.microsoft**: Microsoft analyzer for French. - * **fr.lucene**: Lucene analyzer for French. - * **gl.lucene**: Lucene analyzer for Galician. - * **de.microsoft**: Microsoft analyzer for German. - * **de.lucene**: Lucene analyzer for German. - * **el.microsoft**: Microsoft analyzer for Greek. - * **el.lucene**: Lucene analyzer for Greek. - * **gu.microsoft**: Microsoft analyzer for Gujarati. - * **he.microsoft**: Microsoft analyzer for Hebrew. - * **hi.microsoft**: Microsoft analyzer for Hindi. - * **hi.lucene**: Lucene analyzer for Hindi. - * **hu.microsoft**: Microsoft analyzer for Hungarian. - * **hu.lucene**: Lucene analyzer for Hungarian. - * **is.microsoft**: Microsoft analyzer for Icelandic. - * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). - * **id.lucene**: Lucene analyzer for Indonesian. - * **ga.lucene**: Lucene analyzer for Irish. - * **it.microsoft**: Microsoft analyzer for Italian. - * **it.lucene**: Lucene analyzer for Italian. - * **ja.microsoft**: Microsoft analyzer for Japanese. - * **ja.lucene**: Lucene analyzer for Japanese. - * **kn.microsoft**: Microsoft analyzer for Kannada. - * **ko.microsoft**: Microsoft analyzer for Korean. - * **ko.lucene**: Lucene analyzer for Korean. - * **lv.microsoft**: Microsoft analyzer for Latvian. - * **lv.lucene**: Lucene analyzer for Latvian. - * **lt.microsoft**: Microsoft analyzer for Lithuanian. - * **ml.microsoft**: Microsoft analyzer for Malayalam. - * **ms.microsoft**: Microsoft analyzer for Malay (Latin). - * **mr.microsoft**: Microsoft analyzer for Marathi. - * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). - * **no.lucene**: Lucene analyzer for Norwegian. - * **fa.lucene**: Lucene analyzer for Persian. - * **pl.microsoft**: Microsoft analyzer for Polish. - * **pl.lucene**: Lucene analyzer for Polish. - * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). - * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). - * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). - * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). - * **pa.microsoft**: Microsoft analyzer for Punjabi. - * **ro.microsoft**: Microsoft analyzer for Romanian. - * **ro.lucene**: Lucene analyzer for Romanian. - * **ru.microsoft**: Microsoft analyzer for Russian. - * **ru.lucene**: Lucene analyzer for Russian. - * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). - * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). - * **sk.microsoft**: Microsoft analyzer for Slovak. - * **sl.microsoft**: Microsoft analyzer for Slovenian. - * **es.microsoft**: Microsoft analyzer for Spanish. - * **es.lucene**: Lucene analyzer for Spanish. - * **sv.microsoft**: Microsoft analyzer for Swedish. - * **sv.lucene**: Lucene analyzer for Swedish. - * **ta.microsoft**: Microsoft analyzer for Tamil. - * **te.microsoft**: Microsoft analyzer for Telugu. - * **th.microsoft**: Microsoft analyzer for Thai. - * **th.lucene**: Lucene analyzer for Thai. - * **tr.microsoft**: Microsoft analyzer for Turkish. - * **tr.lucene**: Lucene analyzer for Turkish. - * **uk.microsoft**: Microsoft analyzer for Ukrainian. - * **ur.microsoft**: Microsoft analyzer for Urdu. - * **vi.microsoft**: Microsoft analyzer for Vietnamese. - * **standard.lucene**: Standard Lucene analyzer. - * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers - * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html - * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html - * **simple**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html - * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html - * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html - */ export type AnalyzerNames = `${KnownLexicalAnalyzerName}`; export type BlobIndexerDataToExtract = `${KnownBlobIndexerDataToExtract}`; export type BlobIndexerImageAction = `${KnownBlobIndexerImageAction}`; diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index 0c71e1fb177d..541b61278975 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -1,25 +1,20 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { +import { SearchResult as GeneratedSearchResult, SuggestDocumentsResult as GeneratedSuggestDocumentsResult, -} from "./generated/data/models/index.js"; -import type { - AIServicesAccountIdentity as GeneratedAIServicesAccountIdentity, - AIServicesAccountKey as GeneratedAIServicesAccountKey, - AIServicesVisionVectorizer as GeneratedAIServicesVisionVectorizer, - AMLParameters as GeneratedAMLParameters, - AMLVectorizer as GeneratedAMLVectorizer, +} from "./generated/data/models"; +import { AzureOpenAIVectorizer as GeneratedAzureOpenAIVectorizer, BM25Similarity, ClassicSimilarity, - CognitiveServicesAccountKey as GeneratedCognitiveServicesAccountKey, + CognitiveServicesAccountKey, CognitiveServicesAccountUnion, CustomAnalyzer as BaseCustomAnalyzer, DataChangeDetectionPolicyUnion, DataDeletionDetectionPolicyUnion, - DefaultCognitiveServicesAccount as GeneratedDefaultCognitiveServicesAccount, + DefaultCognitiveServicesAccount, ExhaustiveKnnAlgorithmConfiguration as GeneratedExhaustiveKnnAlgorithmConfiguration, HighWaterMarkChangeDetectionPolicy, HnswAlgorithmConfiguration as GeneratedHnswAlgorithmConfiguration, @@ -31,12 +26,10 @@ import type { SearchField as GeneratedSearchField, SearchIndex as GeneratedSearchIndex, SearchIndexer as GeneratedSearchIndexer, - SearchIndexerCache as GeneratedSearchIndexerCache, SearchIndexerDataIdentityUnion, SearchIndexerDataNoneIdentity, SearchIndexerDataSource as GeneratedSearchIndexerDataSourceConnection, SearchIndexerDataUserAssignedIdentity, - SearchIndexerKnowledgeStore as BaseSearchIndexerKnowledgeStore, SearchIndexerSkillset as GeneratedSearchIndexerSkillset, SearchIndexerSkillUnion, SearchResourceEncryptionKey as GeneratedSearchResourceEncryptionKey, @@ -49,19 +42,11 @@ import type { VectorSearch as GeneratedVectorSearch, VectorSearchAlgorithmConfigurationUnion as GeneratedVectorSearchAlgorithmConfiguration, VectorSearchVectorizerUnion as GeneratedVectorSearchVectorizer, - WebApiVectorizer as GeneratedWebApiVectorizer, -} from "./generated/service/models/index.js"; -import type { - SearchResult, - SelectFields, - SuggestDocumentsResult, - SuggestResult, -} from "./indexModels.js"; -import { logger } from "./logger.js"; -import type { - AIServicesVisionVectorizer, - AzureMachineLearningVectorizer, - AzureMachineLearningVectorizerParameters, + WebApiVectorizer as GeneratedWebAPIVectorizer, +} from "./generated/service/models"; +import { SearchResult, SelectFields, SuggestDocumentsResult, SuggestResult } from "./indexModels"; +import { logger } from "./logger"; +import { AzureOpenAIVectorizer, BlobIndexerDataToExtract, BlobIndexerImageAction, @@ -75,11 +60,9 @@ import type { IndexerExecutionEnvironment, IndexingParameters, IndexingParametersConfiguration, - KeyAuthAzureMachineLearningVectorizerParameters, + isComplexField, LexicalAnalyzer, - LexicalNormalizer, LexicalTokenizer, - NoAuthAzureMachineLearningVectorizerParameters, PatternAnalyzer, RegexFlags, ScoringProfile, @@ -87,29 +70,25 @@ import type { SearchFieldDataType, SearchIndex, SearchIndexer, - SearchIndexerCache, SearchIndexerDataIdentity, SearchIndexerDataSourceConnection, SearchIndexerDataSourceType, SearchIndexerIndexProjection, - SearchIndexerKnowledgeStore, SearchIndexerSkill, SearchIndexerSkillset, SearchResourceEncryptionKey, SimilarityAlgorithm, SimpleField, SynonymMap, - TokenAuthAzureMachineLearningVectorizerParameters, TokenFilter, VectorSearch, VectorSearchAlgorithmConfiguration, VectorSearchAlgorithmMetric, VectorSearchVectorizer, WebApiVectorizer, -} from "./serviceModels.js"; -import { isComplexField } from "./serviceModels.js"; +} from "./serviceModels"; -export const defaultServiceVersion = "2024-09-01-Preview"; +export const defaultServiceVersion = "2024-07-01"; const knownSkills: Record<`${SearchIndexerSkillUnion["odatatype"]}`, true> = { "#Microsoft.Skills.Custom.WebApiSkill": true, @@ -131,9 +110,6 @@ const knownSkills: Record<`${SearchIndexerSkillUnion["odatatype"]}`, true> = { "#Microsoft.Skills.Util.ShaperSkill": true, "#Microsoft.Skills.Vision.ImageAnalysisSkill": true, "#Microsoft.Skills.Vision.OcrSkill": true, - "#Microsoft.Skills.Custom.AmlSkill": true, - "#Microsoft.Skills.Vision.VectorizeSkill": true, - "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": true, }; export function convertSkillsToPublic(skills: SearchIndexerSkillUnion[]): SearchIndexerSkill[] { @@ -152,19 +128,7 @@ export function convertCognitiveServicesAccountToGenerated( return cognitiveServicesAccount; } - switch (cognitiveServicesAccount.odatatype) { - case "#Microsoft.Azure.Search.AIServicesByIdentity": - case "#Microsoft.Azure.Search.DefaultCognitiveServices": - case "#Microsoft.Azure.Search.CognitiveServicesByKey": - case "#Microsoft.Azure.Search.AIServicesByKey": - return cognitiveServicesAccount; - default: { - logger.warning( - `Unsupported Cognitive Services account odatatype: ${(cognitiveServicesAccount as any).odatatype}`, - ); - return cognitiveServicesAccount as any; - } - } + return cognitiveServicesAccount as CognitiveServicesAccountUnion; } export function convertCognitiveServicesAccountToPublic( @@ -174,37 +138,11 @@ export function convertCognitiveServicesAccountToPublic( return cognitiveServicesAccount; } - const deserializers: Record< - CognitiveServicesAccountUnion["odatatype"], - () => CognitiveServicesAccount - > = { - "#Microsoft.Azure.Search.DefaultCognitiveServices": () => { - return cognitiveServicesAccount as GeneratedDefaultCognitiveServicesAccount; - }, - "#Microsoft.Azure.Search.CognitiveServicesByKey": () => { - return cognitiveServicesAccount as GeneratedCognitiveServicesAccountKey; - }, - "#Microsoft.Azure.Search.AIServicesByKey": () => { - return cognitiveServicesAccount as GeneratedAIServicesAccountKey; - }, - "#Microsoft.Azure.Search.AIServicesByIdentity": () => { - const { identity, ...restParams } = - cognitiveServicesAccount as GeneratedAIServicesAccountIdentity; - return { - ...restParams, - identity: convertSearchIndexerDataIdentityToPublic(identity ?? undefined), - }; - }, - }; - - const defaultDeserializer: () => CognitiveServicesAccount = () => { - logger.warning( - `Unsupported Cognitive Services account odatatype: ${(cognitiveServicesAccount as CognitiveServicesAccount).odatatype}`, - ); - return cognitiveServicesAccount as CognitiveServicesAccount; - }; - - return (deserializers[cognitiveServicesAccount.odatatype] ?? defaultDeserializer)(); + if (cognitiveServicesAccount.odatatype === "#Microsoft.Azure.Search.DefaultCognitiveServices") { + return cognitiveServicesAccount as DefaultCognitiveServicesAccount; + } else { + return cognitiveServicesAccount as CognitiveServicesAccountKey; + } } export function convertTokenFiltersToGenerated( @@ -302,8 +240,7 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie const type: SearchFieldDataType = field.type as SearchFieldDataType; const synonymMapNames: string[] | undefined = field.synonymMaps; - const { retrievable, analyzer, searchAnalyzer, indexAnalyzer, normalizer, ...restField } = - field; + const { retrievable, analyzer, searchAnalyzer, indexAnalyzer, ...restField } = field; const hidden = typeof retrievable === "boolean" ? !retrievable : retrievable; const result: SimpleField = { @@ -313,7 +250,6 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie analyzerName: analyzer, searchAnalyzerName: searchAnalyzer, indexAnalyzerName: indexAnalyzer, - normalizerName: normalizer, synonymMapNames, }; return result; @@ -321,15 +257,13 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie }); } -export function convertFieldsToGenerated( - fields: SearchField[] | undefined, -): GeneratedSearchField[] | undefined { - return fields?.map((field) => { +export function convertFieldsToGenerated(fields: SearchField[]): GeneratedSearchField[] { + return fields.map((field) => { if (isComplexField(field)) { return { name: field.name, type: field.type, - fields: convertFieldsToGenerated(field.fields), + fields: field.fields ? convertFieldsToGenerated(field.fields) : field.fields, }; } else { const { hidden, ...restField } = field; @@ -346,7 +280,6 @@ export function convertFieldsToGenerated( searchAnalyzer: field.searchAnalyzerName, indexAnalyzer: field.indexAnalyzerName, synonymMaps: field.synonymMapNames, - normalizer: field.normalizerName, }; } }); @@ -431,7 +364,6 @@ function convertEncryptionKeyToPublic( keyName: encryptionKey.keyName, keyVersion: encryptionKey.keyVersion, vaultUrl: encryptionKey.vaultUri, - identity: convertSearchIndexerDataIdentityToPublic(encryptionKey.identity), }; if (encryptionKey.accessCredentials) { @@ -453,7 +385,6 @@ function convertEncryptionKeyToGenerated( keyName: encryptionKey.keyName, keyVersion: encryptionKey.keyVersion, vaultUri: encryptionKey.vaultUrl, - identity: encryptionKey.identity, }; if (encryptionKey.applicationId) { @@ -478,7 +409,6 @@ export function generatedIndexToPublicIndex(generatedIndex: GeneratedSearchIndex tokenizers: convertTokenizersToPublic(generatedIndex.tokenizers), tokenFilters: generatedIndex.tokenFilters as TokenFilter[], charFilters: generatedIndex.charFilters as CharFilter[], - normalizers: generatedIndex.normalizers as LexicalNormalizer[], scoringProfiles: generatedIndex.scoringProfiles as ScoringProfile[], fields: convertFieldsToPublic(generatedIndex.fields), similarity: convertSimilarityToPublic(generatedIndex.similarity), @@ -498,11 +428,8 @@ export function generatedVectorSearchVectorizerToPublicVectorizer( return generatedVectorizer; } - const knownVectorizerDeserializers: Record< - VectorSearchVectorizer["kind"], - () => VectorSearchVectorizer - > = { - azureOpenAI: () => { + switch (generatedVectorizer.kind) { + case "azureOpenAI": { const { parameters } = generatedVectorizer as GeneratedAzureOpenAIVectorizer; const authIdentity = convertSearchIndexerDataIdentityToPublic(parameters?.authIdentity); const vectorizer: AzureOpenAIVectorizer = { @@ -510,90 +437,19 @@ export function generatedVectorSearchVectorizerToPublicVectorizer( parameters: { ...parameters, authIdentity }, }; return vectorizer; - }, - - customWebApi: () => { - const { parameters } = generatedVectorizer as GeneratedWebApiVectorizer; + } + case "customWebApi": { + const { parameters } = generatedVectorizer as GeneratedWebAPIVectorizer; const authIdentity = convertSearchIndexerDataIdentityToPublic(parameters?.authIdentity); const vectorizer: WebApiVectorizer = { - ...(generatedVectorizer as GeneratedWebApiVectorizer), + ...(generatedVectorizer as GeneratedWebAPIVectorizer), parameters: { ...parameters, authIdentity }, }; return vectorizer; - }, - - aiServicesVision: () => { - const generatedVisionVectorizer = generatedVectorizer as GeneratedAIServicesVisionVectorizer; - const { aIServicesVisionParameters: generatedParameters } = generatedVisionVectorizer; - const parameters = generatedParameters - ? { - ...generatedParameters, - modelVersion: generatedParameters.modelVersion ?? undefined, - resourceUri: generatedParameters.resourceUri, - authIdentity: convertSearchIndexerDataIdentityToPublic( - generatedParameters.authIdentity, - ), - } - : undefined; - const vectorizer: AIServicesVisionVectorizer = { - ...generatedVisionVectorizer, - parameters, - }; - return vectorizer; - }, - aml: () => { - const generatedAMLVectorizer = generatedVectorizer as GeneratedAMLVectorizer; - - const vectorizer: AzureMachineLearningVectorizer = { - ...generatedAMLVectorizer, - amlParameters: - generatedAzureMachineLearningVectorizerParametersToPublicAzureMachineLearningVectorizerParameters( - generatedAMLVectorizer.aMLParameters, - ), - }; - - return vectorizer; - }, - }; - const defaultDeserializer = (): any => { - logger.warning(`Unsupported vectorizer kind: ${(generatedVectorizer as any).kind}`); - return generatedVectorizer as any; - }; - - return (knownVectorizerDeserializers[generatedVectorizer.kind] ?? defaultDeserializer)(); -} - -function generatedAzureMachineLearningVectorizerParametersToPublicAzureMachineLearningVectorizerParameters( - aMLParameters?: GeneratedAMLParameters, -): AzureMachineLearningVectorizerParameters | undefined { - if (!aMLParameters) { - return aMLParameters; - } - - const { resourceId, authenticationKey, scoringUri } = aMLParameters; - // Sensitive to case order - switch (true) { - case resourceId !== undefined && resourceId !== null: { - return { - ...aMLParameters, - authKind: "token", - } as TokenAuthAzureMachineLearningVectorizerParameters; - } - case authenticationKey !== undefined && authenticationKey !== null: { - return { - ...aMLParameters, - authKind: "key", - } as KeyAuthAzureMachineLearningVectorizerParameters; - } - case scoringUri !== undefined && scoringUri !== null: { - return { - ...aMLParameters, - authKind: "none", - } as NoAuthAzureMachineLearningVectorizerParameters; } } - logger.warning("Unknown AML parameter kind"); - return aMLParameters as any; + logger.warning(`Unsupported vectorizer kind: ${(generatedVectorizer as any).kind}`); + return generatedVectorizer as any; } export function generatedVectorSearchAlgorithmConfigurationToPublicVectorSearchAlgorithmConfiguration(): undefined; @@ -648,7 +504,6 @@ export function generatedSearchResultToPublicSearchResult< _highlights: highlights, _rerankerScore: rerankerScore, _captions: captions, - documentDebugInfo: documentDebugInfo, ...restProps } = result; const obj = { @@ -656,7 +511,6 @@ export function generatedSearchResultToPublicSearchResult< highlights, rerankerScore, captions, - documentDebugInfo, document: restProps, }; return obj as SearchResult; @@ -697,7 +551,7 @@ export function publicIndexToGeneratedIndex(index: SearchIndex): GeneratedSearch tokenFilters: convertTokenFiltersToGenerated(tokenFilters), analyzers: convertAnalyzersToGenerated(analyzers), tokenizers: convertTokenizersToGenerated(tokenizers), - fields: convertFieldsToGenerated(fields) ?? [], + fields: convertFieldsToGenerated(fields), similarity: convertSimilarityToGenerated(similarity), }; } @@ -705,19 +559,12 @@ export function publicIndexToGeneratedIndex(index: SearchIndex): GeneratedSearch export function generatedSkillsetToPublicSkillset( generatedSkillset: GeneratedSearchIndexerSkillset, ): SearchIndexerSkillset { - const { - skills, - cognitiveServicesAccount, - knowledgeStore, - encryptionKey, - indexProjection, - ...props - } = generatedSkillset; + const { skills, cognitiveServicesAccount, encryptionKey, indexProjection, ...props } = + generatedSkillset; return { ...props, skills: convertSkillsToPublic(skills), cognitiveServicesAccount: convertCognitiveServicesAccountToPublic(cognitiveServicesAccount), - knowledgeStore: convertKnowledgeStoreToPublic(knowledgeStore), encryptionKey: convertEncryptionKeyToPublic(encryptionKey), indexProjection: indexProjection as SearchIndexerIndexProjection, }; @@ -726,12 +573,17 @@ export function generatedSkillsetToPublicSkillset( export function publicSkillsetToGeneratedSkillset( skillset: SearchIndexerSkillset, ): GeneratedSearchIndexerSkillset { - const { cognitiveServicesAccount, encryptionKey } = skillset; - return { ...skillset, - cognitiveServicesAccount: convertCognitiveServicesAccountToGenerated(cognitiveServicesAccount), - encryptionKey: convertEncryptionKeyToGenerated(encryptionKey), + name: skillset.name, + description: skillset.description, + etag: skillset.etag, + skills: skillset.skills, + cognitiveServicesAccount: convertCognitiveServicesAccountToGenerated( + skillset.cognitiveServicesAccount, + ), + knowledgeStore: skillset.knowledgeStore, + encryptionKey: convertEncryptionKeyToGenerated(skillset.encryptionKey), }; } @@ -804,7 +656,6 @@ export function generatedSearchIndexerToPublicSearchIndexer( ...indexer, parameters, encryptionKey: convertEncryptionKeyToPublic(indexer.encryptionKey), - cache: convertSearchIndexerCacheToPublic(indexer.cache), }; } @@ -819,7 +670,6 @@ export function publicDataSourceToGeneratedDataSource( connectionString: dataSource.connectionString, }, container: dataSource.container, - identity: dataSource.identity, etag: dataSource.etag, dataChangeDetectionPolicy: dataSource.dataChangeDetectionPolicy, dataDeletionDetectionPolicy: dataSource.dataDeletionDetectionPolicy, @@ -836,7 +686,6 @@ export function generatedDataSourceToPublicDataSource( type: dataSource.type as SearchIndexerDataSourceType, connectionString: dataSource.credentials.connectionString, container: dataSource.container, - identity: convertSearchIndexerDataIdentityToPublic(dataSource.identity), etag: dataSource.etag, dataChangeDetectionPolicy: convertDataChangeDetectionPolicyToPublic( dataSource.dataChangeDetectionPolicy, @@ -899,29 +748,3 @@ export function getRandomIntegerInclusive(min: number, max: number): number { const offset = Math.floor(Math.random() * (max - min + 1)); return offset + min; } - -function convertKnowledgeStoreToPublic( - knowledgeStore: BaseSearchIndexerKnowledgeStore | undefined, -): SearchIndexerKnowledgeStore | undefined { - if (!knowledgeStore) { - return knowledgeStore; - } - - return { - ...knowledgeStore, - identity: convertSearchIndexerDataIdentityToPublic(knowledgeStore.identity), - }; -} - -export function convertSearchIndexerCacheToPublic( - cache?: GeneratedSearchIndexerCache, -): SearchIndexerCache | undefined { - if (!cache) { - return cache; - } - - return { - ...cache, - identity: convertSearchIndexerDataIdentityToPublic(cache.identity), - }; -} diff --git a/sdk/search/search-documents/src/synonymMapHelper-browser.mts b/sdk/search/search-documents/src/synonymMapHelper-browser.mts index 7fbd358e50d7..e588a2381fd7 100644 --- a/sdk/search/search-documents/src/synonymMapHelper-browser.mts +++ b/sdk/search/search-documents/src/synonymMapHelper-browser.mts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { SynonymMap } from "./serviceModels.js"; +import { SynonymMap } from "./serviceModels.js"; /** * Helper method to create a SynonymMap object. This is a NodeJS only method. diff --git a/sdk/search/search-documents/src/synonymMapHelper.ts b/sdk/search/search-documents/src/synonymMapHelper.ts index 9a10bab8dfb6..6e002f2a7fbc 100644 --- a/sdk/search/search-documents/src/synonymMapHelper.ts +++ b/sdk/search/search-documents/src/synonymMapHelper.ts @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import fs from "node:fs"; -import { promisify } from "node:util"; -import type { SynonymMap } from "./serviceModels.js"; +import * as fs from "fs"; +import { promisify } from "util"; +import { SynonymMap } from "./serviceModels"; const readFileAsync = promisify(fs.readFile); /** diff --git a/sdk/search/search-documents/src/tracing.ts b/sdk/search/search-documents/src/tracing.ts index 6678b4bea44a..38bff4bae880 100644 --- a/sdk/search/search-documents/src/tracing.ts +++ b/sdk/search/search-documents/src/tracing.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { createTracingClient } from "@azure/core-tracing"; diff --git a/sdk/search/search-documents/src/walk.ts b/sdk/search/search-documents/src/walk.ts index 2fac66b1aed4..8d1e12b25fd9 100644 --- a/sdk/search/search-documents/src/walk.ts +++ b/sdk/search/search-documents/src/walk.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { isDefined } from "@azure/core-util"; diff --git a/sdk/search/search-documents/test/README.md b/sdk/search/search-documents/test/README.md index ea9c3f96c4a9..b9d063c8d05e 100644 --- a/sdk/search/search-documents/test/README.md +++ b/sdk/search/search-documents/test/README.md @@ -2,15 +2,19 @@ To test this project, make sure to build it by following our [building instructions](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md#building), then follow the [testing instructions](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md#testing). -The Azure Cognitive Search client does not have any recorded tests and so, all the tests require an Azure Cognitive Search account to be set up beforehand. You can use existing Azure resources for the live tests, or generate new ones by using our [New-TestResources.ps1](https://github.com/Azure/azure-sdk-for-js/blob/main/eng/common/TestResources/New-TestResources.ps1) script, which will use a [Bicep template](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/search/test-resources.bicep) that already has all of the the necessary configurations. +The Azure Cognitive Search client does not have any recorded tests and so, all the tests require an Azure Cognitive Search account to be set up beforehand. You can use existing Azure resources for the live tests, or generate new ones by using our [New-TestResources.ps1](https://github.com/Azure/azure-sdk-for-js/blob/main/eng/common/TestResources/New-TestResources.ps1) script, which will use an [ARM template](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/search/test-resources.json) that already has all of the the necessary configurations. The Azure resource that is used by the tests in this project is: -- An [Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) account. +- An [Azure Cognitive Search](https://docs.microsoft.com/azure/search/search-what-is-azure-search) account. To run the live tests, you will also need to set the below environment variables: - `TEST_MODE`: Should have `live` assigned. +- `SEARCH_API_ADMIN_KEY`: The primary key of your Azure Search account. +- `SEARCH_API_ADMIN_KEY_ALT` (optional): The secondary key of your Azure Search account. - `ENDPOINT`: The endpoint of your Azure Search account. The live tests in this project will create, populate and search over search indexes inside of the provided Azure Cognitive Search account. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-js%2Fsdk%2Fsearch%2Fsearch-documents%2Ftest%2FREADME.png) diff --git a/sdk/search/search-documents/test/internal/base64.spec.ts b/sdk/search/search-documents/test/internal/base64.spec.ts index 252a27b289c0..a5021d7ed23a 100644 --- a/sdk/search/search-documents/test/internal/base64.spec.ts +++ b/sdk/search/search-documents/test/internal/base64.spec.ts @@ -1,10 +1,11 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -import { decode, encode } from "../../src/base64.js"; -import { describe, it, assert } from "vitest"; +// Licensed under the MIT license. -describe("base64", () => { - it("strings can roundtrip", () => { +import { assert } from "chai"; +import { decode, encode } from "../../src/base64"; + +describe("base64", function () { + it("strings can roundtrip", function () { const message = "Only *you* can prevent null dereferences!"; const encoded = encode(message); const decoded = decode(encoded); diff --git a/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts b/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts index 3564ba4da121..24581907eec8 100644 --- a/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts +++ b/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts @@ -1,10 +1,11 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -import { createSynonymMapFromFile } from "../../../src/synonymMapHelper.js"; -import { describe, it, assert } from "vitest"; +// Licensed under the MIT license. -describe("synonymmap", () => { - it("create synonymmap from file(browser)", async () => { +import { assert } from "chai"; +import { createSynonymMapFromFile } from "../../../src/synonymMapHelper.browser"; + +describe("synonymmap", function () { + it("create synonymmap from file(browser)", async function () { let errorThrown = false; try { await createSynonymMapFromFile("my-synonym-map-1", "./test/internal/synonymMap.txt"); diff --git a/sdk/search/search-documents/test/internal/geographyPoint.spec.ts b/sdk/search/search-documents/test/internal/geographyPoint.spec.ts index eee95607cf3f..7c194b88902c 100644 --- a/sdk/search/search-documents/test/internal/geographyPoint.spec.ts +++ b/sdk/search/search-documents/test/internal/geographyPoint.spec.ts @@ -1,11 +1,12 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import GeographyPoint from "../../src/geographyPoint.js"; -import { describe, it, assert } from "vitest"; +import { assert } from "chai"; +import * as sinon from "sinon"; +import GeographyPoint from "../../src/geographyPoint"; -describe("geographyPoint", () => { - it("JSON.stringify", () => { +describe("geographyPoint", function () { + it("JSON.stringify", function () { const geoPoint = new GeographyPoint({ longitude: -122.123889, latitude: 47.669444, @@ -17,4 +18,8 @@ describe("geographyPoint", () => { crs: { type: "name", properties: { name: "EPSG:4326" } }, }); }); + + afterEach(function () { + sinon.restore(); + }); }); diff --git a/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts b/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts index d0fcfc404b34..ddcefd4c9afe 100644 --- a/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts +++ b/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts @@ -1,8 +1,9 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -import { createSynonymMapFromFile } from "../../../src/index.js"; -import type { SynonymMap } from "../../../src/serviceModels.js"; -import { describe, it, assert } from "vitest"; +// Licensed under the MIT license. + +import { assert } from "chai"; +import { createSynonymMapFromFile } from "../../../src"; +import { SynonymMap } from "../../../src/serviceModels"; describe("synonymmap", function () { it("create synonymmap from file(node)", async function () { diff --git a/sdk/search/search-documents/test/internal/serialization.spec.ts b/sdk/search/search-documents/test/internal/serialization.spec.ts index 092c727c75c9..44e0aa9d9e7f 100644 --- a/sdk/search/search-documents/test/internal/serialization.spec.ts +++ b/sdk/search/search-documents/test/internal/serialization.spec.ts @@ -1,25 +1,26 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import GeographyPoint from "../../src/geographyPoint.js"; -import { deserialize, serialize } from "../../src/serialization.js"; -import { describe, it, assert } from "vitest"; +import { assert } from "chai"; +import * as sinon from "sinon"; +import GeographyPoint from "../../src/geographyPoint"; +import { deserialize, serialize } from "../../src/serialization"; -describe("serialization.serialize", () => { - it("nested", () => { +describe("serialization.serialize", function () { + it("nested", function () { const nestedInput = { a: { b: { c: { d: [42] } } } }; const result = serialize(nestedInput); assert.deepEqual(nestedInput, result); }); - it("circular", () => { + it("circular", function () { const circularInput: any = { a: null }; circularInput.a = circularInput; const result = serialize(circularInput); assert.deepEqual(circularInput, result); }); - it("recursive 1", () => { + it("recursive 1", function () { const child = { hello: "world" }; const documents = [ { id: "1", children: [child] }, @@ -29,7 +30,7 @@ describe("serialization.serialize", () => { assert.deepEqual(documents, result); }); - it("recursive 2", () => { + it("recursive 2", function () { const child = { hello: Infinity, world: -Infinity, universe: NaN }; const expectChild = { hello: "INF", world: "-INF", universe: "NaN" }; const documents = [ @@ -43,22 +44,22 @@ describe("serialization.serialize", () => { assert.deepEqual(result, expect); }); - it("NaN", () => { + it("NaN", function () { const result = serialize({ a: NaN }); assert.deepEqual(result, { a: "NaN" }); }); - it("Infinity", () => { + it("Infinity", function () { const result = serialize({ a: Infinity }); assert.deepEqual(result, { a: "INF" }); }); - it("Negative Infinity", () => { + it("Negative Infinity", function () { const result = serialize({ a: -Infinity }); assert.deepEqual(result, { a: "-INF" }); }); - it("GeographyPoint", () => { + it("GeographyPoint", function () { const result = serialize({ location: new GeographyPoint({ latitude: 37.989769, longitude: -84.527771 }), }); @@ -71,23 +72,27 @@ describe("serialization.serialize", () => { }; assert.deepEqual(result, expect); }); + + afterEach(function () { + sinon.restore(); + }); }); -describe("serialization.deserialize", () => { - it("nested", () => { +describe("serialization.deserialize", function () { + it("nested", function () { const nestedInput = { a: { b: { c: { d: [42] } } } }; const result = deserialize(nestedInput); assert.deepEqual(nestedInput, result); }); - it("circular", () => { + it("circular", function () { const circularInput: any = { a: null }; circularInput.a = circularInput; const result = deserialize(circularInput); assert.deepEqual(circularInput, result); }); - it("recursive 1", () => { + it("recursive 1", function () { const child = { hello: "world" }; const documents = [ { id: "1", children: [child] }, @@ -97,7 +102,7 @@ describe("serialization.deserialize", () => { assert.deepEqual(documents, result); }); - it("recursive 2", () => { + it("recursive 2", function () { const child = { hello: "INF", world: "-INF", universe: "NaN" }; const expectChild = { hello: Infinity, world: -Infinity, universe: NaN }; const documents = [ @@ -111,50 +116,50 @@ describe("serialization.deserialize", () => { assert.deepEqual(result, expect); }); - it("NaN", () => { + it("NaN", function () { const result = deserialize({ a: "NaN" }); assert.deepEqual(result, { a: NaN }); }); - it("Infinity", () => { + it("Infinity", function () { const result = deserialize({ a: "INF" }); assert.deepEqual(result, { a: Infinity }); }); - it("Negative Infinity", () => { + it("Negative Infinity", function () { const result = deserialize({ a: "-INF" }); assert.deepEqual(result, { a: -Infinity }); }); - it("Date", () => { + it("Date", function () { const result = deserialize({ a: "1975-04-04T00:00:00.000Z" }); assert.deepEqual(result, { a: new Date(Date.UTC(1975, 3, 4)) }); }); - it("Date with truncated ms field", () => { + it("Date with truncated ms field", function () { const result = deserialize({ a: "1975-04-04T00:00:00.0Z" }); assert.deepEqual(result, { a: new Date(Date.UTC(1975, 3, 4)) }); }); - it("doesn't deserialize as Date if text before", () => { + it("doesn't deserialize as Date if text before", function () { const value = "before 1975-04-04T00:00:00.000Z"; const result = deserialize({ a: value }); assert.deepEqual(result, { a: value }); }); - it("doesn't deserialize as Date if text after", () => { + it("doesn't deserialize as Date if text after", function () { const value = "1975-04-04T00:00:00.000Z after"; const result = deserialize({ a: value }); assert.deepEqual(result, { a: value }); }); - it("doesn't deserialize as Date if text before and after", () => { + it("doesn't deserialize as Date if text before and after", function () { const value = "before 1975-04-04T00:00:00.000Z after"; const result = deserialize({ a: value }); assert.deepEqual(result, { a: value }); }); - it("GeographyPoint", () => { + it("GeographyPoint", function () { const result: { location: GeographyPoint } = deserialize({ location: { type: "Point", @@ -166,4 +171,8 @@ describe("serialization.deserialize", () => { assert.equal(result.location.latitude, 37.989769); assert.equal(result.location.longitude, -84.527771); }); + + afterEach(function () { + sinon.restore(); + }); }); diff --git a/sdk/search/search-documents/test/internal/serviceUtils.spec.ts b/sdk/search/search-documents/test/internal/serviceUtils.spec.ts index 6bab6cd37d84..64e205a37ffc 100644 --- a/sdk/search/search-documents/test/internal/serviceUtils.spec.ts +++ b/sdk/search/search-documents/test/internal/serviceUtils.spec.ts @@ -1,13 +1,14 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -import type { SearchField as GeneratedSearchField } from "../../src/generated/service/models/index.js"; -import { KnownAnalyzerNames } from "../../src/index.js"; -import type { ComplexField, SearchField } from "../../src/serviceModels.js"; -import { convertFieldsToGenerated, convertFieldsToPublic } from "../../src/serviceUtils.js"; -import { describe, it, assert } from "vitest"; +// Licensed under the MIT license. -describe("serviceUtils", () => { - it("convert generated fields to public fields", () => { +import { assert } from "chai"; +import { SearchField as GeneratedSearchField } from "../../src/generated/service/models/index"; +import { KnownAnalyzerNames } from "../../src/index"; +import { ComplexField, SearchField } from "../../src/serviceModels"; +import { convertFieldsToGenerated, convertFieldsToPublic } from "../../src/serviceUtils"; + +describe("serviceUtils", function () { + it("convert generated fields to public fields", function () { const publicFields: SearchField[] = convertFieldsToPublic([ { name: "id", @@ -20,7 +21,6 @@ describe("serviceUtils", () => { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, - normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }, @@ -37,13 +37,12 @@ describe("serviceUtils", () => { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, - normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }); }); - it("convert generated fields (complex) to public fields", () => { + it("convert generated fields (complex) to public fields", function () { const publicFields: SearchField[] = convertFieldsToPublic([ { name: "ComplexObj", @@ -60,7 +59,6 @@ describe("serviceUtils", () => { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, - normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }, @@ -84,14 +82,13 @@ describe("serviceUtils", () => { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, - normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }); }); - it("convert public fields to generated fields", () => { - const generatedFields: GeneratedSearchField[] | undefined = convertFieldsToGenerated([ + it("convert public fields to generated fields", function () { + const generatedFields: GeneratedSearchField[] = convertFieldsToGenerated([ { name: "id", key: true, @@ -103,13 +100,12 @@ describe("serviceUtils", () => { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, - normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }, ]); - assert.include(generatedFields?.[0], { + assert.include(generatedFields[0], { name: "id", key: true, type: "Edm.String", @@ -120,14 +116,13 @@ describe("serviceUtils", () => { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, - normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }); }); - it("convert public fields (complex) to generated fields", () => { - const generatedFields: GeneratedSearchField[] | undefined = convertFieldsToGenerated([ + it("convert public fields (complex) to generated fields", function () { + const generatedFields: GeneratedSearchField[] = convertFieldsToGenerated([ { name: "ComplexObj", type: "Edm.ComplexType", @@ -143,7 +138,6 @@ describe("serviceUtils", () => { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, - normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }, @@ -151,12 +145,12 @@ describe("serviceUtils", () => { }, ]); - assert.include(generatedFields?.[0], { + assert.include(generatedFields[0], { name: "ComplexObj", type: "Edm.ComplexType", }); - assert.include(generatedFields?.[0].fields![0], { + assert.include(generatedFields[0].fields![0], { name: "id", key: true, type: "Edm.String", @@ -167,7 +161,6 @@ describe("serviceUtils", () => { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, - normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }); diff --git a/sdk/search/search-documents/test/narrowedTypes.ts b/sdk/search/search-documents/test/narrowedTypes.ts index 63400f1a79a4..47f2e86a5db2 100644 --- a/sdk/search/search-documents/test/narrowedTypes.ts +++ b/sdk/search/search-documents/test/narrowedTypes.ts @@ -1,19 +1,20 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. /* eslint-disable no-unused-expressions */ +/* eslint-disable no-constant-condition */ /* eslint-disable @typescript-eslint/ban-ts-comment */ /* eslint-disable @typescript-eslint/explicit-function-return-type */ +/* eslint-disable @typescript-eslint/no-unused-vars */ -import type { SelectFields } from "../src/index.js"; -import { SearchClient } from "../src/index.js"; -import type { +import { SearchClient, SelectFields } from "../src/index"; +import { NarrowedModel as GenericNarrowedModel, SearchFieldArray, SearchPick, SelectArray, SuggestNarrowedModel, -} from "../src/indexModels.js"; +} from "../src/indexModels"; type Equals = (() => T extends T1 ? true : false) extends () => T extends T2 ? true : false ? any : never; @@ -55,9 +56,8 @@ function testSelectFields() { const a: Equals, string> = "pass"; const b: Equals, string> = "pass"; const c: Equals, string> = "pass"; - // Should pass as the fields are narrowed to the model fields - // @ts-expect-error const d: Equals, ModelFields> = "pass"; + // SelectFields should be an error, as unknown should be cast // @ts-expect-error const e: Equals, string> = "fail"; @@ -210,8 +210,6 @@ function testSelectArray() { // @ts-expect-error function testSearchFieldArray() { const a: Equals, readonly string[]> = "pass"; - // Should pass as the fields are narrowed to the model fields - // @ts-expect-error const b: Equals, readonly ModelFields[]> = "pass"; const c: Equals, readonly string[]> = "pass"; const d: Equals, readonly string[]> = "pass"; @@ -253,8 +251,6 @@ function testNarrowedClient() { >["queries"] >[number]["fields"] >; - // Should pass as the fields are narrowed to the model fields - // @ts-expect-error const a: Equals = "pass"; return a; }; diff --git a/sdk/search/search-documents/test/public/node/searchClient.spec.ts b/sdk/search/search-documents/test/public/node/searchClient.spec.ts index 718ab5cb247a..70ce9aa4c9eb 100644 --- a/sdk/search/search-documents/test/public/node/searchClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchClient.spec.ts @@ -1,32 +1,31 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { env, isLiveMode, Recorder } from "@azure-tools/test-recorder"; +import { assert } from "chai"; +import { Context, Suite } from "mocha"; + import { delay } from "@azure/core-util"; -import type { OpenAIClient } from "@azure/openai"; -import type { - AutocompleteResult, - SearchFieldArray, - SearchIndex, - SearchIndexClient, - SelectArray, - SelectFields, -} from "../../../src/index.js"; +import { OpenAIClient } from "@azure/openai"; import { + AutocompleteResult, AzureKeyCredential, IndexDocumentsBatch, - KnownQueryLanguage, - KnownQuerySpeller, SearchClient, -} from "../../../src/index.js"; -import { defaultServiceVersion } from "../../../src/serviceUtils.js"; -import type { Hotel } from "../utils/interfaces.js"; -import { createClients } from "../utils/recordedClient.js"; -import { createIndex, createRandomIndexName, populateIndex, WAIT_TIME } from "../utils/setup.js"; -import { describe, it, assert, beforeEach, afterEach } from "vitest"; - -describe("SearchClient", { timeout: 20_000 }, () => { - describe("constructor", () => { + SearchIndex, + SearchIndexClient, + SelectFields, +} from "../../../src"; +import { SearchFieldArray, SelectArray } from "../../../src/indexModels"; +import { defaultServiceVersion } from "../../../src/serviceUtils"; +import { Hotel } from "../utils/interfaces"; +import { createClients } from "../utils/recordedClient"; +import { createIndex, createRandomIndexName, populateIndex, WAIT_TIME } from "../utils/setup"; + +describe("SearchClient", function (this: Suite) { + this.timeout(20_000); + + describe("constructor", function () { const credential = new AzureKeyCredential("key"); describe("Passing serviceVersion", () => { @@ -64,8 +63,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { }); }); - // TODO: the preview-only tests are mixed in here when they should be in another describe (and removed in the stable release branch) - describe("stable", { skip: true }, () => { + describe("stable", function () { let recorder: Recorder; let searchClient: SearchClient; let indexClient: SearchIndexClient; @@ -73,8 +71,8 @@ describe("SearchClient", { timeout: 20_000 }, () => { let TEST_INDEX_NAME: string; let indexDefinition: SearchIndex; - beforeEach(async (ctx) => { - recorder = new Recorder(ctx); + beforeEach(async function (this: Context) { + recorder = new Recorder(this.currentTest); TEST_INDEX_NAME = createRandomIndexName(); ({ searchClient, @@ -87,123 +85,24 @@ describe("SearchClient", { timeout: 20_000 }, () => { await populateIndex(searchClient, openAIClient); }); - afterEach(async () => { + afterEach(async function () { await indexClient.deleteIndex(TEST_INDEX_NAME); await delay(WAIT_TIME); await recorder?.stop(); }); - const baseSemanticOptions = () => - ({ - queryLanguage: KnownQueryLanguage.EnUs, - queryType: "semantic", - semanticSearchOptions: { - configurationName: - indexDefinition.semanticSearch?.configurations?.[0].name ?? - assert.fail("No semantic configuration in index."), - }, - }) as const; - - it("search with speller", async () => { - const searchResults = await searchClient.search("budjet", { - skip: 0, - top: 5, - includeTotalCount: true, - queryLanguage: KnownQueryLanguage.EnUs, - speller: KnownQuerySpeller.Lexicon, - }); - assert.equal(searchResults.count, 6); - }); - - it("search with semantic ranking", async () => { - const searchResults = await searchClient.search("luxury", { - ...baseSemanticOptions(), - skip: 0, - top: 5, - includeTotalCount: true, - }); - assert.equal(searchResults.count, 1); - }); - - it("search with document debug info", async () => { - const baseOptions = baseSemanticOptions(); - const options = { - ...baseOptions, - semanticSearchOptions: { - ...baseOptions.semanticSearchOptions, - errorMode: "fail", - debugMode: "semantic", - }, - } as const; - const searchResults = await searchClient.search("luxury", options); - for await (const result of searchResults.results) { - assert.deepEqual( - { - semantic: { - contentFields: [ - { - name: "description", - state: "used", - }, - ], - keywordFields: [ - { - name: "tags", - state: "used", - }, - ], - rerankerInput: { - content: - "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa, and a really helpful concierge. The location is perfect -- right downtown, close to all the tourist attractions. We highly recommend this hotel.", - keywords: "pool\r\nview\r\nwifi\r\nconcierge", - title: "Fancy Stay", - }, - titleField: { - name: "hotelName", - state: "used", - }, - }, - }, - result.documentDebugInfo, - ); - } - }); - - it("search with answers", async () => { - const baseOptions = baseSemanticOptions(); - const options = { - ...baseOptions, - semanticSearchOptions: { - ...baseOptions.semanticSearchOptions, - answers: { answerType: "extractive", count: 3, threshold: 0.7 }, - }, - top: 3, - select: ["hotelId"], - } as const; - const searchResults = await searchClient.search( - "What are the most luxurious hotels?", - options, - ); - - const resultIds = []; - for await (const result of searchResults.results) { - resultIds.push(result.document.hotelId); - } - assert.deepEqual(["1", "9", "3"], resultIds); - }); - - it("count returns the correct document count", async () => { + it("count returns the correct document count", async function () { const documentCount = await searchClient.getDocumentsCount(); assert.equal(documentCount, 10); }); - it("autocomplete returns the correct autocomplete result", async () => { + it("autocomplete returns the correct autocomplete result", async function () { const autoCompleteResult: AutocompleteResult = await searchClient.autocomplete("sec", "sg"); assert.equal(autoCompleteResult.results.length, 1); assert.equal(autoCompleteResult.results[0].text, "secret"); }); - it("autocomplete returns zero results for invalid query", async () => { + it("autocomplete returns zero results for invalid query", async function () { const autoCompleteResult: AutocompleteResult = await searchClient.autocomplete( "garbxyz", "sg", @@ -211,7 +110,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.isTrue(autoCompleteResult.results.length === 0); }); - it("search returns the correct search result", async () => { + it("search returns the correct search result", async function () { const searchResults = await searchClient.search("budget", { skip: 0, top: 5, @@ -221,10 +120,9 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(searchResults.count, 6); }); - it("search narrows the result type", async () => { - // This part of the test is only for types. This doesn't need to be called. - // eslint-disable-next-line no-unused-expressions - async () => { + it("search narrows the result type", async function () { + // eslint-disable-next-line no-constant-condition + if (false) { const response = await searchClient.search("asdf", { select: ["address/city"], }); @@ -233,7 +131,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { // @ts-expect-error result.document.category = ""; } - }; + } const hotelKeys: (keyof Hotel)[] = [ "address", @@ -338,7 +236,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { await Promise.all(searchFieldsTestPromises); }); - it("search returns zero results for invalid query", async () => { + it("search returns zero results for invalid query", async function () { const searchResults = await searchClient.search("garbxyz", { skip: 0, top: 5, @@ -347,7 +245,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(searchResults.count, 0); }); - it("suggest returns the correct suggestions", async () => { + it("suggest returns the correct suggestions", async function () { const suggestResult = await searchClient.suggest("WiFi", "sg"); assert.equal(suggestResult.results.length, 1); assert.isTrue( @@ -355,12 +253,12 @@ describe("SearchClient", { timeout: 20_000 }, () => { ); }); - it("suggest returns zero suggestions for invalid input", async () => { + it("suggest returns zero suggestions for invalid input", async function () { const suggestResult = await searchClient.suggest("garbxyz", "sg"); assert.equal(suggestResult.results.length, 0); }); - it("getDocument returns the correct document result", async () => { + it("getDocument returns the correct document result", async function () { const getDocumentResult = await searchClient.getDocument("8"); assert.equal( getDocumentResult.description, @@ -373,7 +271,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(getDocumentResult.hotelId, "8"); }); - it("getDocument throws error for invalid getDocument Value", async () => { + it("getDocument throws error for invalid getDocument Value", async function () { let errorThrown = false; try { await searchClient.getDocument("garbxyz"); @@ -383,7 +281,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.isTrue(errorThrown, "Expected getDocument to fail with an exception"); }); - it("deleteDocuments delete a document by documents", async () => { + it("deleteDocuments delete a document by documents", async function () { const getDocumentResult = await searchClient.getDocument("8"); await searchClient.deleteDocuments([getDocumentResult]); await delay(WAIT_TIME); @@ -391,14 +289,14 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(documentCount, 9); }); - it("deleteDocuments delete a document by key/keyNames", async () => { + it("deleteDocuments delete a document by key/keyNames", async function () { await searchClient.deleteDocuments("hotelId", ["9", "10"]); await delay(WAIT_TIME); const documentCount = await searchClient.getDocumentsCount(); assert.equal(documentCount, 8); }); - it("mergeOrUploadDocuments modify & merge an existing document", async () => { + it("mergeOrUploadDocuments modify & merge an existing document", async function () { let getDocumentResult = await searchClient.getDocument("6"); getDocumentResult.description = "Modified Description"; await searchClient.mergeOrUploadDocuments([getDocumentResult]); @@ -407,7 +305,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(getDocumentResult.description, "Modified Description"); }); - it("mergeOrUploadDocuments merge a new document", async () => { + it("mergeOrUploadDocuments merge a new document", async function () { const document = { hotelId: "11", description: "New Hotel Description", @@ -419,7 +317,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(documentCount, 11); }); - it("mergeDocuments modify & merge an existing document", async () => { + it("mergeDocuments modify & merge an existing document", async function () { let getDocumentResult = await searchClient.getDocument("6"); getDocumentResult.description = "Modified Description"; await searchClient.mergeDocuments([getDocumentResult]); @@ -428,7 +326,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(getDocumentResult.description, "Modified Description"); }); - it("uploadDocuments upload a set of documents", async () => { + it("uploadDocuments upload a set of documents", async function () { const documents = [ { hotelId: "11", @@ -447,7 +345,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(documentCount, 12); }); - it("indexDocuments upload a new document", async () => { + it("indexDocuments upload a new document", async function () { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.upload([ { @@ -462,7 +360,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(documentCount, 11); }); - it("indexDocuments deletes existing documents", async () => { + it("indexDocuments deletes existing documents", async function () { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.delete([ { @@ -479,7 +377,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(documentCount, 8); }); - it("indexDocuments merges an existing document", async () => { + it("indexDocuments merges an existing document", async function () { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.merge([ { @@ -494,7 +392,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(getDocumentResult.description, "Modified Description"); }); - it("indexDocuments merge/upload documents", async () => { + it("indexDocuments merge/upload documents", async function () { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.mergeOrUpload([ { @@ -516,9 +414,50 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(documentCount, 11); }); - it("search with semantic error handling", async () => { + it("search with semantic ranking", async function () { const searchResults = await searchClient.search("luxury", { - ...baseSemanticOptions(), + skip: 0, + top: 5, + includeTotalCount: true, + queryType: "semantic", + semanticSearchOptions: { + configurationName: + indexDefinition.semanticSearch?.configurations?.[0].name ?? + assert.fail("No semantic configuration in index."), + }, + }); + assert.equal(searchResults.count, 1); + }); + + it("search with answers", async function () { + const searchResults = await searchClient.search("What are the most luxurious hotels?", { + queryType: "semantic", + semanticSearchOptions: { + configurationName: + indexDefinition.semanticSearch?.configurations?.[0].name ?? + assert.fail("No semantic configuration in index."), + answers: { answerType: "extractive", count: 3, threshold: 0.7 }, + }, + top: 3, + select: ["hotelId"], + }); + + const resultIds = []; + for await (const result of searchResults.results) { + resultIds.push(result.document.hotelId); + } + assert.deepEqual(["1", "9", "3"], resultIds); + }); + + it("search with semantic error handling", async function () { + const searchResults = await searchClient.search("luxury", { + queryType: "semantic", + semanticSearchOptions: { + configurationName: + indexDefinition.semanticSearch?.configurations?.[0].name ?? + assert.fail("No semantic configuration in index."), + errorMode: "partial", + }, select: ["hotelId"], }); @@ -529,10 +468,10 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.deepEqual(["1"], resultIds); }); - it("search with vector", async (ctx) => { + it("search with vector", async function () { // This live test is disabled due to temporary limitations with the new OpenAI service if (isLiveMode()) { - ctx.skip(); + this.skip(); } const embeddings = await openAIClient.getEmbeddings( env.AZURE_OPENAI_DEPLOYMENT_NAME ?? "deployment-name", @@ -563,10 +502,10 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.deepEqual(resultIds, ["1", "3", "4"]); }); - it("multi-vector search", async (ctx) => { + it("multi-vector search", async function () { // This live test is disabled due to temporary limitations with the new OpenAI service if (isLiveMode()) { - ctx.skip(); + this.skip(); } const embeddings = await openAIClient.getEmbeddings( env.AZURE_OPENAI_DEPLOYMENT_NAME ?? "deployment-name", @@ -603,10 +542,10 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.deepEqual(resultIds, ["1", "3", "4"]); }); - it("oversampling compressed vectors", async (ctx) => { + it("oversampling compressed vectors", async function () { // This live test is disabled due to temporary limitations with the new OpenAI service if (isLiveMode()) { - ctx.skip(); + this.skip(); } const embeddings = await openAIClient.getEmbeddings( diff --git a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts index b3d34cfc1c5e..0f01c72fe061 100644 --- a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts @@ -1,30 +1,34 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { env, isLiveMode, Recorder } from "@azure-tools/test-recorder"; import { delay } from "@azure/core-util"; -import type { +import { assert } from "chai"; +import { Context, Suite } from "mocha"; +import { + AzureKeyCredential, AzureOpenAIVectorizer, SearchIndex, + SearchIndexClient, SynonymMap, VectorSearchAlgorithmConfiguration, VectorSearchProfile, -} from "../../../src/index.js"; -import { AzureKeyCredential, SearchIndexClient } from "../../../src/index.js"; -import { defaultServiceVersion } from "../../../src/serviceUtils.js"; -import type { Hotel } from "../utils/interfaces.js"; -import { createClients } from "../utils/recordedClient.js"; +} from "../../../src"; +import { defaultServiceVersion } from "../../../src/serviceUtils"; +import { Hotel } from "../utils/interfaces"; +import { createClients } from "../utils/recordedClient"; import { createRandomIndexName, createSimpleIndex, createSynonymMaps, deleteSynonymMaps, WAIT_TIME, -} from "../utils/setup.js"; -import { describe, it, assert, beforeEach, afterEach } from "vitest"; +} from "../utils/setup"; + +describe("SearchIndexClient", function (this: Suite) { + this.timeout(20_000); -describe("SearchIndexClient", { timeout: 20_000 }, () => { - describe("constructor", () => { + describe("constructor", function () { const credential = new AzureKeyCredential("key"); describe("Passing serviceVersion", () => { @@ -62,13 +66,13 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - describe("stable", { skip: true }, () => { + describe("stable", function () { let recorder: Recorder; let indexClient: SearchIndexClient; let TEST_INDEX_NAME: string; - beforeEach(async (ctx) => { - recorder = new Recorder(ctx); + beforeEach(async function (this: Context) { + recorder = new Recorder(this.currentTest); TEST_INDEX_NAME = createRandomIndexName(); ({ indexClient, indexName: TEST_INDEX_NAME } = await createClients( defaultServiceVersion, @@ -81,20 +85,20 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { await delay(WAIT_TIME); }); - afterEach(async () => { + afterEach(async function () { await indexClient.deleteIndex(TEST_INDEX_NAME); await delay(WAIT_TIME); await deleteSynonymMaps(indexClient); await recorder?.stop(); }); - describe("#synonymmaps", () => { - it("gets the list of synonymmaps", async () => { + describe("#synonymmaps", function () { + it("gets the list of synonymmaps", async function () { const synonymMaps = await indexClient.listSynonymMaps(); assert.isAtLeast(synonymMaps.length, 2); }); - it("gets the list of synonymmaps names", async () => { + it("gets the list of synonymmaps names", async function () { const synonymMapNames = await indexClient.listSynonymMapsNames(); assert.isAtLeast(synonymMapNames.length, 2); for (let i = 1; i <= 2; i++) { @@ -102,7 +106,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { } }); - it("gets the correct synonymmap object", async () => { + it("gets the correct synonymmap object", async function () { const synonymMap = await indexClient.getSynonymMap("my-azure-synonymmap-1"); assert.equal(synonymMap.name, "my-azure-synonymmap-1"); assert.equal(synonymMap.synonyms.length, 2); @@ -114,7 +118,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { assert.include(synonyms, synonymMap.synonyms[1]); }); - it("throws error for invalid synonymmap object", async () => { + it("throws error for invalid synonymmap object", async function () { let retrievalError: boolean = false; try { await indexClient.getSynonymMap("garbxyz"); @@ -124,7 +128,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { assert.isTrue(retrievalError); }); - it("creates the synonymmap object using createOrUpdateSynonymMap", async () => { + it("creates the synonymmap object using createOrUpdateSynonymMap", async function () { let synonymMap: SynonymMap = { name: `my-azure-synonymmap-3`, synonyms: ["United States, United States of America => USA", "Washington, Wash. => WA"], @@ -145,7 +149,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { } }); - it("modify and updates the synonymmap object", async () => { + it("modify and updates the synonymmap object", async function () { let synonymMap = await indexClient.getSynonymMap("my-azure-synonymmap-1"); synonymMap.synonyms.push("California, Clif. => CA"); await indexClient.createOrUpdateSynonymMap(synonymMap); @@ -162,8 +166,8 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - describe("#indexes", () => { - it("gets the list of indexes", async () => { + describe("#indexes", function () { + it("gets the list of indexes", async function () { const result = await indexClient.listIndexes(); let listOfIndexes = await result.next(); const indexNames: string[] = []; @@ -174,7 +178,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { assert.include(indexNames, TEST_INDEX_NAME); }); - it("gets the list of indexes names", async () => { + it("gets the list of indexes names", async function () { const result = await indexClient.listIndexesNames(); let listOfIndexNames = await result.next(); const indexNames: string[] = []; @@ -185,13 +189,13 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { assert.include(indexNames, TEST_INDEX_NAME); }); - it("gets the correct index object", async () => { + it("gets the correct index object", async function () { const index = await indexClient.getIndex(TEST_INDEX_NAME); assert.equal(index.name, TEST_INDEX_NAME); assert.equal(index.fields.length, 5); }); - it("throws error for invalid index object", async () => { + it("throws error for invalid index object", async function () { let retrievalError: boolean = false; try { await indexClient.getIndex("garbxyz"); @@ -201,7 +205,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { assert.isTrue(retrievalError); }); - it("creates the index object using createOrUpdateIndex", async () => { + it("creates the index object using createOrUpdateIndex", async function () { const indexName: string = isLiveMode() ? createRandomIndexName() : "hotel-live-test4"; let index: SearchIndex = { name: indexName, @@ -251,7 +255,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { } }); - it("modify and updates the index object", async () => { + it("modify and updates the index object", async function () { let index = await indexClient.getIndex(TEST_INDEX_NAME); index.fields.push({ type: "Edm.DateTimeOffset", @@ -264,7 +268,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - it("creates the index object vector fields", async () => { + it("creates the index object vector fields", async function () { const indexName: string = isLiveMode() ? createRandomIndexName() : "hotel-live-test4"; const algorithm: VectorSearchAlgorithmConfiguration = { diff --git a/sdk/search/search-documents/test/public/odata.spec.ts b/sdk/search/search-documents/test/public/odata.spec.ts index d2545cea382c..e939de63fcba 100644 --- a/sdk/search/search-documents/test/public/odata.spec.ts +++ b/sdk/search/search-documents/test/public/odata.spec.ts @@ -1,7 +1,9 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. -import { odata } from "../../src/index.js"; -import { describe, it, assert } from "vitest"; +// Licensed under the MIT license. + +import { assert } from "chai"; +import * as sinon from "sinon"; +import { odata } from "../../src"; describe("odata", function () { it("simple string isn't changed", function () { @@ -79,4 +81,8 @@ describe("odata", function () { assert.strictEqual(odata`Foo eq ${"bar's"}`, "Foo eq 'bar''s'"); assert.strictEqual(odata`Foo eq ${'"bar"'}`, "Foo eq '\"bar\"'"); }); + + afterEach(function () { + sinon.restore(); + }); }); diff --git a/sdk/search/search-documents/test/public/typeDefinitions.ts b/sdk/search/search-documents/test/public/typeDefinitions.ts index 7bb7816d0255..3afd59df59d6 100644 --- a/sdk/search/search-documents/test/public/typeDefinitions.ts +++ b/sdk/search/search-documents/test/public/typeDefinitions.ts @@ -1,14 +1,16 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { +/* eslint-disable @typescript-eslint/no-unused-vars */ + +import { KnownSemanticErrorMode, KnownSemanticErrorReason, KnownSemanticSearchResultsType, KnownVectorFilterMode, KnownVectorQueryKind, -} from "../../src/generated/data/index.js"; -import type { +} from "../../src/generated/data"; +import { KnownBlobIndexerDataToExtract, KnownBlobIndexerImageAction, KnownBlobIndexerParsingMode, @@ -32,7 +34,7 @@ import type { KnownVectorSearchAlgorithmKind, KnownVectorSearchAlgorithmMetric, KnownVisualFeature, -} from "../../src/generated/service/index.js"; +} from "../../src/generated/service"; import type { IsEqual } from "type-plus"; @@ -79,7 +81,6 @@ type BlobIndexerParsingMode = | "json" | "jsonArray" | "jsonLines" - | "markdown" | "text"; type BlobIndexerPDFTextRotationAlgorithm = "detectAngles" | "none"; type CustomEntityLookupSkillLanguage = "da" | "de" | "en" | "es" | "fi" | "fr" | "it" | "ko" | "pt"; @@ -395,8 +396,7 @@ type SearchIndexerDataSourceType = | "azuresql" | "azuretable" | "cosmosdb" - | "mysql" - | "onelake"; + | "mysql"; type SemanticErrorMode = "fail" | "partial"; type SemanticErrorReason = "capacityOverloaded" | "maxWaitExceeded" | "transient"; type SemanticSearchResultsType = "baseResults" | "rerankedResults"; @@ -525,7 +525,7 @@ type TextTranslationSkillLanguage = | "zh-Hans" | "zh-Hant"; type VectorFilterMode = "postFilter" | "preFilter"; -type VectorQueryKind = "imageBinary" | "imageUrl" | "text" | "vector"; +type VectorQueryKind = "text" | "vector"; type VectorSearchAlgorithmKind = "exhaustiveKnn" | "hnsw"; type VectorSearchAlgorithmMetric = "cosine" | "dotProduct" | "euclidean" | "hamming"; type VisualFeature = diff --git a/sdk/search/search-documents/test/public/utils/interfaces.ts b/sdk/search/search-documents/test/public/utils/interfaces.ts index 4b18b3bcd707..cbf59ad1d666 100644 --- a/sdk/search/search-documents/test/public/utils/interfaces.ts +++ b/sdk/search/search-documents/test/public/utils/interfaces.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. -import type { GeographyPoint } from "../../../src/index.js"; +import { GeographyPoint } from "../../../src"; export interface Hotel { hotelId: string; diff --git a/sdk/search/search-documents/test/public/utils/recordedClient.ts b/sdk/search/search-documents/test/public/utils/recordedClient.ts index 0132e8edc71b..e843f1b005dd 100644 --- a/sdk/search/search-documents/test/public/utils/recordedClient.ts +++ b/sdk/search/search-documents/test/public/utils/recordedClient.ts @@ -1,12 +1,17 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { createTestCredential } from "@azure-tools/test-credential"; -import type { Recorder, RecorderStartOptions, SanitizerOptions } from "@azure-tools/test-recorder"; -import { assertEnvironmentVariable, env } from "@azure-tools/test-recorder"; +import { + assertEnvironmentVariable, + env, + Recorder, + RecorderStartOptions, + SanitizerOptions, +} from "@azure-tools/test-recorder"; import { isDefined } from "@azure/core-util"; import { OpenAIClient } from "@azure/openai"; -import { SearchClient, SearchIndexClient, SearchIndexerClient } from "../../../src/index.js"; +import { SearchClient, SearchIndexClient, SearchIndexerClient } from "../../../src"; export interface Clients { searchClient: SearchClient; @@ -49,7 +54,6 @@ function createRecorderStartOptions(): RecorderStartOptions { }; return { envSetupForPlayback, - removeCentralSanitizers: ["AZSDK2021", "AZSDK3493"], sanitizerOptions: { generalSanitizers, bodyKeySanitizers: [bodyKeySanitizer], diff --git a/sdk/search/search-documents/test/public/utils/setup.ts b/sdk/search/search-documents/test/public/utils/setup.ts index fbe9553aa5ee..6680ae288d51 100644 --- a/sdk/search/search-documents/test/public/utils/setup.ts +++ b/sdk/search/search-documents/test/public/utils/setup.ts @@ -1,10 +1,13 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. +// Licensed under the MIT license. import { assertEnvironmentVariable, isLiveMode, isPlaybackMode } from "@azure-tools/test-recorder"; import { computeSha256Hash, delay, isDefined } from "@azure/core-util"; -import type { OpenAIClient } from "@azure/openai"; -import type { +import { OpenAIClient } from "@azure/openai"; +import { assert } from "chai"; +import { + GeographyPoint, + KnownAnalyzerNames, SearchClient, SearchField, SearchIndex, @@ -12,15 +15,13 @@ import type { SearchIndexerClient, VectorSearchAlgorithmConfiguration, VectorSearchCompression, - VectorSearchProfile, VectorSearchVectorizer, -} from "../../../src/index.js"; -import { GeographyPoint, KnownAnalyzerNames } from "../../../src/index.js"; -import type { Hotel } from "./interfaces.js"; -import { assert } from "vitest"; +} from "../../../src"; +import { Hotel } from "./interfaces"; export const WAIT_TIME = isPlaybackMode() ? 0 : 4000; +// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function createIndex( client: SearchIndexClient, name: string, @@ -39,7 +40,7 @@ export async function createIndex( }, }, ]; - await Promise.all(vectorizers.map((v) => renameUniquelyInPlace("vectorizerName", v))); + await Promise.all(vectorizers.map(renameUniquelyInPlace("vectorizerName"))); const [azureOpenAiVectorizerName] = vectorizers.map((v) => v.vectorizerName); const algorithmConfigurations: VectorSearchAlgorithmConfiguration[] = [ @@ -54,7 +55,7 @@ export async function createIndex( parameters: { metric: "euclidean" }, }, ]; - await Promise.all(algorithmConfigurations.map((c) => renameUniquelyInPlace("name", c))); + await Promise.all(algorithmConfigurations.map(renameUniquelyInPlace("name"))); const [hnswAlgorithmConfigurationName, exhaustiveKnnAlgorithmConfigurationName] = algorithmConfigurations.map((c) => c.name); @@ -66,27 +67,27 @@ export async function createIndex( rerankWithOriginalVectors: true, }, ]; - await Promise.all( - compressionConfigurations.map((c) => renameUniquelyInPlace("compressionName", c)), - ); + await Promise.all(compressionConfigurations.map(renameUniquelyInPlace("compressionName"))); const [scalarQuantizationCompressionConfigurationName] = compressionConfigurations.map( (c) => c.compressionName, ); - const vectorSearchProfiles: VectorSearchProfile[] = [ + const vectorSearchProfiles = [ { name: "vector-search-profile", - vectorizerName: isPreview ? azureOpenAiVectorizerName : undefined, + vectorizer: isPreview ? azureOpenAiVectorizerName : undefined, algorithmConfigurationName: exhaustiveKnnAlgorithmConfigurationName, }, { name: "vector-search-profile", - vectorizerName: isPreview ? azureOpenAiVectorizerName : undefined, + vectorizer: isPreview ? azureOpenAiVectorizerName : undefined, algorithmConfigurationName: hnswAlgorithmConfigurationName, - compressionName: isPreview ? scalarQuantizationCompressionConfigurationName : undefined, + compressionConfigurationName: isPreview + ? scalarQuantizationCompressionConfigurationName + : undefined, }, ]; - await Promise.all(vectorSearchProfiles.map((p) => renameUniquelyInPlace("name", p))); + await Promise.all(vectorSearchProfiles.map(renameUniquelyInPlace("name"))); const [azureOpenAiVectorSearchProfileName, azureOpenAiCompressedVectorSearchProfileName] = vectorSearchProfiles.map((p) => p.name); @@ -343,6 +344,7 @@ export async function createIndex( return client.createIndex(hotelIndex); } +// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function populateIndex( client: SearchClient, openAIClient: OpenAIClient, @@ -583,12 +585,14 @@ async function addVectorDescriptions( }); } +// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function deleteDataSourceConnections(client: SearchIndexerClient): Promise { for (let i = 1; i <= 2; i++) { await client.deleteDataSourceConnection(`my-data-source-${i}`); } } +// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function createSkillsets(client: SearchIndexerClient): Promise { const testCaseNames: string[] = ["my-azureblob-skillset-1", "my-azureblob-skillset-2"]; const skillSetNames: string[] = await client.listSkillsetsNames(); @@ -640,12 +644,14 @@ export async function createSkillsets(client: SearchIndexerClient): Promise { for (let i = 1; i <= 2; i++) { await client.deleteSkillset(`my-azureblob-skillset-${i}`); } } +// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function createIndexers( client: SearchIndexerClient, targetIndexName: string, @@ -762,11 +768,12 @@ export function createRandomIndexName(): string { return `hotel-live-test-${Math.floor(Math.random() * 100000) + 1000000}`; } -async function renameUniquelyInPlace( +function renameUniquelyInPlace( prop: T, - obj: Record, -): Promise { - const hash = await computeSha256Hash(JSON.stringify(obj), "hex"); - const name = [obj[prop], hash.toLowerCase()].join("-"); - obj[prop] = name; +): (obj: Record) => Promise { + return async (obj) => { + const hash = await computeSha256Hash(JSON.stringify(obj), "hex"); + const name = [obj[prop], hash.toLowerCase()].join("-"); + obj[prop] = name; + }; } diff --git a/sdk/search/search-documents/test/snippets.spec.ts b/sdk/search/search-documents/test/snippets.spec.ts deleted file mode 100644 index c162d0af5b6b..000000000000 --- a/sdk/search/search-documents/test/snippets.spec.ts +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -import { describe, it } from "vitest"; -import { - AzureKeyCredential, - KnownSearchAudience, - odata, - SearchClient, - SearchFieldArray, - SearchIndexClient, - SearchIndexerClient, - SelectFields, -} from "../src/index.js"; -import { setLogLevel } from "@azure/logger"; - -describe("snippets", () => { - it("ReadmeSampleCreateClient_APIKey", async () => { - // To query and manipulate documents - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - // To manage indexes and synonymmaps - const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); - // @ts-preserve-whitespace - // To manage indexers, datasources and skillsets - const indexerClient = new SearchIndexerClient("", new AzureKeyCredential("")); - }); - - it("ReadmeSampleCreateClient_NationalCloud", async () => { - // To query and manipulate documents - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - { - audience: KnownSearchAudience.AzureChina, - }, - ); - // @ts-preserve-whitespace - // To manage indexes and synonymmaps - const indexClient = new SearchIndexClient("", new AzureKeyCredential(""), { - audience: KnownSearchAudience.AzureChina, - }); - // @ts-preserve-whitespace - // To manage indexers, datasources and skillsets - const indexerClient = new SearchIndexerClient( - "", - new AzureKeyCredential(""), - { - audience: KnownSearchAudience.AzureChina, - }, - ); - }); - - it("ReadmeSampleCreateIndex", async () => { - const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); - // @ts-preserve-whitespace - const result = await indexClient.createIndex({ - name: "example-index", - fields: [ - { - type: "Edm.String", - name: "id", - key: true, - }, - { - type: "Edm.Double", - name: "awesomenessLevel", - sortable: true, - filterable: true, - facetable: true, - }, - { - type: "Edm.String", - name: "description", - searchable: true, - }, - { - type: "Edm.ComplexType", - name: "details", - fields: [ - { - type: "Collection(Edm.String)", - name: "tags", - searchable: true, - }, - ], - }, - { - type: "Edm.Int32", - name: "hiddenWeight", - hidden: true, - }, - ], - }); - // @ts-preserve-whitespace - console.log(`Index created with name ${result.name}`); - }); - - it("ReadmeSampleGetDocument", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const result = await searchClient.getDocument("1234"); - }); - - it("ReadmeSampleUploadDocuments", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const uploadResult = await searchClient.uploadDocuments([ - // JSON objects matching the shape of the client's index - {}, - {}, - {}, - ]); - for (const result of uploadResult.results) { - console.log(`Uploaded ${result.key}; succeeded? ${result.succeeded}`); - } - }); - - it("ReadmeSampleSearch", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const searchResults = await searchClient.search("wifi -luxury"); - for await (const result of searchResults.results) { - console.log(result); - } - }); - - it("ReadmeSampleSearchLucene", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const searchResults = await searchClient.search('Category:budget AND "recently renovated"^3', { - queryType: "full", - searchMode: "all", - }); - for await (const result of searchResults.results) { - console.log(result); - } - }); - - it("ReadmeSampleSearchWithTypes", async () => { - // An example schema for documents in the index - interface Hotel { - hotelId?: string; - hotelName?: string | null; - description?: string | null; - descriptionVector?: Array; - parkingIncluded?: boolean | null; - lastRenovationDate?: Date | null; - rating?: number | null; - rooms?: Array<{ - beds?: number | null; - description?: string | null; - }>; - } - // @ts-preserve-whitespace - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const searchResults = await searchClient.search("wifi -luxury", { - // Only fields in Hotel can be added to this array. - // TS will complain if one is misspelled. - select: ["hotelId", "hotelName", "rooms/beds"], - }); - // @ts-preserve-whitespace - // These are other ways to declare the correct type for `select`. - const select = ["hotelId", "hotelName", "rooms/beds"] as const; - // This declaration lets you opt out of narrowing the TypeScript type of your documents, - // though the AI Search service will still only return these fields. - const selectWide: SelectFields[] = ["hotelId", "hotelName", "rooms/beds"]; - // This is an invalid declaration. Passing this to `select` will result in a compiler error - // unless you opt out of including the model in the client constructor. - const selectInvalid = ["hotelId", "hotelName", "rooms/beds"]; - // @ts-preserve-whitespace - for await (const result of searchResults.results) { - // result.document has hotelId, hotelName, and rating. - // Trying to access result.document.description would emit a TS error. - console.log(result.document.hotelName); - } - }); - - it("ReadmeSampleSearchWithOData", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const baseRateMax = 200; - const ratingMin = 4; - const searchResults = await searchClient.search("WiFi", { - filter: odata`Rooms/any(room: room/BaseRate lt ${baseRateMax}) and Rating ge ${ratingMin}`, - orderBy: ["Rating desc"], - select: ["hotelId", "hotelName", "Rating"], - }); - for await (const result of searchResults.results) { - // Each result will have "HotelId", "HotelName", and "Rating" - // in addition to the standard search result property "score" - console.log(result); - } - }); - - it("ReadmeSampleSearchWithVector", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const queryVector: number[] = [ - // Embedding of the query "What are the most luxurious hotels?" - ]; - const searchResults = await searchClient.search("*", { - vectorSearchOptions: { - queries: [ - { - kind: "vector", - vector: queryVector, - fields: ["descriptionVector"], - kNearestNeighborsCount: 3, - }, - ], - }, - }); - for await (const result of searchResults.results) { - // These results are the nearest neighbors to the query vector - console.log(result); - } - }); - - it("ReadmeSampleSearchWithFacets", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - // @ts-preserve-whitespace - const searchResults = await searchClient.search("WiFi", { - facets: ["category,count:3,sort:count", "rooms/baseRate,interval:100"], - }); - console.log(searchResults.facets); - // Output will look like: - // { - // 'rooms/baseRate': [ - // { count: 16, value: 0 }, - // { count: 17, value: 100 }, - // { count: 17, value: 200 } - // ], - // category: [ - // { count: 5, value: 'Budget' }, - // { count: 5, value: 'Luxury' }, - // { count: 5, value: 'Resort and Spa' } - // ] - // } - }); - - it("ReadmeSampleOdataUsage", async () => { - const baseRateMax = 200; - const ratingMin = 4; - const filter = odata`Rooms/any(room: room/BaseRate lt ${baseRateMax}) and Rating ge ${ratingMin}`; - }); - - it("ReadmeSampleSearchClient", async () => { - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - }); - - it("ReadmeSampleSearchClientWithModel", async () => { - type TModel = { - keyName: string; - field1?: string | null; - field2?: { anotherField?: string | null } | null; - }; - // @ts-preserve-whitespace - const searchClient = new SearchClient( - "", - "", - new AzureKeyCredential(""), - ); - }); - - it("ReadmeSampleAutocomplete", async () => { - type TModel = { - key: string; - azure?: { sdk: string | null } | null; - }; - // @ts-preserve-whitespace - const client = new SearchClient( - "endpoint.azure", - "indexName", - new AzureKeyCredential("key"), - ); - // @ts-preserve-whitespace - const searchFields: SearchFieldArray = ["azure/sdk"]; - // @ts-preserve-whitespace - const autocompleteResult = await client.autocomplete("searchText", "suggesterName", { - searchFields, - }); - }); - - it("ReadmeSampleSearchTModel", async () => { - type TModel = { - key: string; - azure?: { sdk: string | null } | null; - }; - // @ts-preserve-whitespace - const client = new SearchClient( - "endpoint.azure", - "indexName", - new AzureKeyCredential("key"), - ); - // @ts-preserve-whitespace - const select = ["azure/sdk"] as const; - const searchFields: SearchFieldArray = ["azure/sdk"]; - // @ts-preserve-whitespace - const searchResult = await client.search("searchText", { - select, - searchFields, - }); - }); - - it("ReadmeSampleSuggest", async () => { - type TModel = { - key: string; - azure?: { sdk: string | null } | null; - }; - // @ts-preserve-whitespace - const client = new SearchClient( - "endpoint.azure", - "indexName", - new AzureKeyCredential("key"), - ); - // @ts-preserve-whitespace - const select = ["azure/sdk"] as const; - const searchFields: SearchFieldArray = ["azure/sdk"]; - // @ts-preserve-whitespace - const suggestResult = await client.suggest("searchText", "suggesterName", { - select, - searchFields, - }); - }); - - it("ReadmeSampleSearchIndexClient", async () => { - const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); - }); - - it("ReadmeSampleSearchIndexerClient", async () => { - const indexerClient = new SearchIndexerClient("", new AzureKeyCredential("")); - }); - - it("SetLogLevel", () => { - setLogLevel("info"); - }); -}); From 5a9d4d22deec5f6765f82d3a5ce608951c088004 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Tue, 30 Sep 2025 15:57:02 +0000 Subject: [PATCH 02/21] chore(search-documents): add .js extensions to relative imports --- sdk/search/search-documents/src/index.ts | 26 +++++++++---------- .../src/indexDocumentsBatch.ts | 2 +- .../search-documents/src/indexModels.ts | 4 +-- .../search-documents/src/searchClient.ts | 26 +++++++++---------- .../search-documents/src/searchIndexClient.ts | 20 +++++++------- .../src/searchIndexerClient.ts | 18 ++++++------- .../src/searchIndexingBufferedSender.ts | 10 +++---- .../search-documents/src/serialization.ts | 4 +-- .../search-documents/src/serviceModels.ts | 2 +- .../search-documents/src/serviceUtils.ts | 10 +++---- .../search-documents/src/synonymMapHelper.ts | 2 +- 11 files changed, 62 insertions(+), 62 deletions(-) diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 175ecfa37700..69460acedcbf 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -20,7 +20,7 @@ export { QueryType, ScoringStatistics, SearchMode, -} from "./generated/data/models"; +} from "./generated/data/models/index.js"; export { AnalyzedTokenInfo, AnalyzeResult, @@ -185,9 +185,9 @@ export { VectorSearchProfile, VectorSearchVectorizerKind, WordDelimiterTokenFilter, -} from "./generated/service/models"; -export { default as GeographyPoint } from "./geographyPoint"; -export { IndexDocumentsBatch } from "./indexDocumentsBatch"; +} from "./generated/service/models/index.js"; +export { default as GeographyPoint } from "./geographyPoint.js"; +export { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; export { AutocompleteOptions, AutocompleteRequest, @@ -243,19 +243,19 @@ export { VectorQuery, VectorQueryKind, VectorSearchOptions, -} from "./indexModels"; -export { odata } from "./odata"; -export { KnownSearchAudience } from "./searchAudience"; -export { SearchClient, SearchClientOptions } from "./searchClient"; -export { SearchIndexClient, SearchIndexClientOptions } from "./searchIndexClient"; -export { SearchIndexerClient, SearchIndexerClientOptions } from "./searchIndexerClient"; +} from "./indexModels.js"; +export { odata } from "./odata.js"; +export { KnownSearchAudience } from "./searchAudience.js"; +export { SearchClient, SearchClientOptions } from "./searchClient.js"; +export { SearchIndexClient, SearchIndexClientOptions } from "./searchIndexClient.js"; +export { SearchIndexerClient, SearchIndexerClientOptions } from "./searchIndexerClient.js"; export { DEFAULT_BATCH_SIZE, DEFAULT_FLUSH_WINDOW, DEFAULT_RETRY_COUNT, IndexDocumentsClient, SearchIndexingBufferedSender, -} from "./searchIndexingBufferedSender"; +} from "./searchIndexingBufferedSender.js"; export { AnalyzeRequest, AnalyzeTextOptions, @@ -377,5 +377,5 @@ export { WebApiParameters, WebApiSkill, WebApiVectorizer, -} from "./serviceModels"; -export { createSynonymMapFromFile } from "./synonymMapHelper"; +} from "./serviceModels.js"; +export { createSynonymMapFromFile } from "./synonymMapHelper.js"; diff --git a/sdk/search/search-documents/src/indexDocumentsBatch.ts b/sdk/search/search-documents/src/indexDocumentsBatch.ts index 1122943bb701..c7a917e91f8e 100644 --- a/sdk/search/search-documents/src/indexDocumentsBatch.ts +++ b/sdk/search/search-documents/src/indexDocumentsBatch.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { IndexDocumentsAction } from "./indexModels"; +import { IndexDocumentsAction } from "./indexModels.js"; /** * Class used to perform batch operations diff --git a/sdk/search/search-documents/src/indexModels.ts b/sdk/search/search-documents/src/indexModels.ts index 529aa660011e..ab0066a4b1eb 100644 --- a/sdk/search/search-documents/src/indexModels.ts +++ b/sdk/search/search-documents/src/indexModels.ts @@ -17,8 +17,8 @@ import { QueryType, ScoringStatistics, SearchMode, -} from "./generated/data/models"; -import GeographyPoint from "./geographyPoint"; +} from "./generated/data/models/index.js"; +import GeographyPoint from "./geographyPoint.js"; /** * Options for performing the count operation on the index. diff --git a/sdk/search/search-documents/src/searchClient.ts b/sdk/search/search-documents/src/searchClient.ts index f3a1e90697ab..687695b12d6a 100644 --- a/sdk/search/search-documents/src/searchClient.ts +++ b/sdk/search/search-documents/src/searchClient.ts @@ -7,7 +7,7 @@ import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-a import { InternalClientPipelineOptions } from "@azure/core-client"; import { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import { decode, encode } from "./base64"; +import { decode, encode } from "./base64.js"; import { AutocompleteRequest, AutocompleteResult, @@ -17,9 +17,9 @@ import { SearchRequest as GeneratedSearchRequest, SuggestRequest, VectorQueryUnion as GeneratedVectorQuery, -} from "./generated/data/models"; -import { SearchClient as GeneratedClient } from "./generated/data/searchClient"; -import { IndexDocumentsBatch } from "./indexDocumentsBatch"; +} from "./generated/data/models/index.js"; +import { SearchClient as GeneratedClient } from "./generated/data/searchClient.js"; +import { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; import { AutocompleteOptions, CountDocumentsOptions, @@ -46,15 +46,15 @@ import { SuggestOptions, UploadDocumentsOptions, VectorQuery, -} from "./indexModels"; -import { logger } from "./logger"; -import { createOdataMetadataPolicy } from "./odataMetadataPolicy"; -import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy"; -import { KnownSearchAudience } from "./searchAudience"; -import { IndexDocumentsClient } from "./searchIndexingBufferedSender"; -import { deserialize, serialize } from "./serialization"; -import * as utils from "./serviceUtils"; -import { createSpan } from "./tracing"; +} from "./indexModels.js"; +import { logger } from "./logger.js"; +import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; +import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; +import { KnownSearchAudience } from "./searchAudience.js"; +import { IndexDocumentsClient } from "./searchIndexingBufferedSender.js"; +import { deserialize, serialize } from "./serialization.js"; +import * as utils from "./serviceUtils.js"; +import { createSpan } from "./tracing.js"; /** * Client options used to configure Cognitive Search API requests. diff --git a/sdk/search/search-documents/src/searchIndexClient.ts b/sdk/search/search-documents/src/searchIndexClient.ts index 2b8a26c1bf82..41c4c1827d40 100644 --- a/sdk/search/search-documents/src/searchIndexClient.ts +++ b/sdk/search/search-documents/src/searchIndexClient.ts @@ -7,13 +7,13 @@ import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-a import { InternalClientPipelineOptions } from "@azure/core-client"; import { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import { AnalyzeResult } from "./generated/service/models"; -import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient"; -import { logger } from "./logger"; -import { createOdataMetadataPolicy } from "./odataMetadataPolicy"; -import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy"; -import { KnownSearchAudience } from "./searchAudience"; -import { SearchClient, SearchClientOptions as GetSearchClientOptions } from "./searchClient"; +import { AnalyzeResult } from "./generated/service/models/index.js"; +import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient.js"; +import { logger } from "./logger.js"; +import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; +import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; +import { KnownSearchAudience } from "./searchAudience.js"; +import { SearchClient, SearchClientOptions as GetSearchClientOptions } from "./searchClient.js"; import { AnalyzeTextOptions, CreateIndexOptions, @@ -34,9 +34,9 @@ import { SearchIndexStatistics, SearchServiceStatistics, SynonymMap, -} from "./serviceModels"; -import * as utils from "./serviceUtils"; -import { createSpan } from "./tracing"; +} from "./serviceModels.js"; +import * as utils from "./serviceUtils.js"; +import { createSpan } from "./tracing.js"; /** * Client options used to configure Cognitive Search API requests. diff --git a/sdk/search/search-documents/src/searchIndexerClient.ts b/sdk/search/search-documents/src/searchIndexerClient.ts index b378dea65105..f63abbccc7ce 100644 --- a/sdk/search/search-documents/src/searchIndexerClient.ts +++ b/sdk/search/search-documents/src/searchIndexerClient.ts @@ -5,12 +5,12 @@ import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-a import { InternalClientPipelineOptions } from "@azure/core-client"; import { ExtendedCommonClientOptions } from "@azure/core-http-compat"; import { bearerTokenAuthenticationPolicy } from "@azure/core-rest-pipeline"; -import { SearchIndexerStatus } from "./generated/service/models"; -import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient"; -import { logger } from "./logger"; -import { createOdataMetadataPolicy } from "./odataMetadataPolicy"; -import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy"; -import { KnownSearchAudience } from "./searchAudience"; +import { SearchIndexerStatus } from "./generated/service/models/index.js"; +import { SearchServiceClient as GeneratedClient } from "./generated/service/searchServiceClient.js"; +import { logger } from "./logger.js"; +import { createOdataMetadataPolicy } from "./odataMetadataPolicy.js"; +import { createSearchApiKeyCredentialPolicy } from "./searchApiKeyCredentialPolicy.js"; +import { KnownSearchAudience } from "./searchAudience.js"; import { CreateDataSourceConnectionOptions, CreateIndexerOptions, @@ -33,9 +33,9 @@ import { SearchIndexer, SearchIndexerDataSourceConnection, SearchIndexerSkillset, -} from "./serviceModels"; -import * as utils from "./serviceUtils"; -import { createSpan } from "./tracing"; +} from "./serviceModels.js"; +import * as utils from "./serviceUtils.js"; +import { createSpan } from "./tracing.js"; /** * Client options used to configure Cognitive Search API requests. diff --git a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts index 200681fabdea..4e1c6a21437e 100644 --- a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts +++ b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts @@ -5,8 +5,8 @@ import { OperationOptions } from "@azure/core-client"; import { RestError } from "@azure/core-rest-pipeline"; import { delay } from "@azure/core-util"; import EventEmitter from "events"; -import { IndexDocumentsResult } from "./generated/data/models"; -import { IndexDocumentsBatch } from "./indexDocumentsBatch"; +import { IndexDocumentsResult } from "./generated/data/models/index.js"; +import { IndexDocumentsBatch } from "./indexDocumentsBatch.js"; import { IndexDocumentsAction, IndexDocumentsOptions, @@ -16,9 +16,9 @@ import { SearchIndexingBufferedSenderMergeOrUploadDocumentsOptions, SearchIndexingBufferedSenderOptions, SearchIndexingBufferedSenderUploadDocumentsOptions, -} from "./indexModels"; -import { getRandomIntegerInclusive } from "./serviceUtils"; -import { createSpan } from "./tracing"; +} from "./indexModels.js"; +import { getRandomIntegerInclusive } from "./serviceUtils.js"; +import { createSpan } from "./tracing.js"; /** * Index Documents Client diff --git a/sdk/search/search-documents/src/serialization.ts b/sdk/search/search-documents/src/serialization.ts index cb47dad56a6d..d8016fc1ecc7 100644 --- a/sdk/search/search-documents/src/serialization.ts +++ b/sdk/search/search-documents/src/serialization.ts @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import GeographyPoint from "./geographyPoint"; -import { walk } from "./walk"; +import GeographyPoint from "./geographyPoint.js"; +import { walk } from "./walk.js"; const ISO8601DateRegex = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,3})?Z$/i; const GeoJSONPointTypeName = "Point"; diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index af471e70e576..e22fb8cb9938 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -110,7 +110,7 @@ import { VectorSearchProfile, VectorSearchVectorizerKind, WordDelimiterTokenFilter, -} from "./generated/service/models"; +} from "./generated/service/models/index.js"; /** * Options for a list skillsets operation. diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index 541b61278975..0109daba17ee 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -4,7 +4,7 @@ import { SearchResult as GeneratedSearchResult, SuggestDocumentsResult as GeneratedSuggestDocumentsResult, -} from "./generated/data/models"; +} from "./generated/data/models/index.js"; import { AzureOpenAIVectorizer as GeneratedAzureOpenAIVectorizer, BM25Similarity, @@ -43,9 +43,9 @@ import { VectorSearchAlgorithmConfigurationUnion as GeneratedVectorSearchAlgorithmConfiguration, VectorSearchVectorizerUnion as GeneratedVectorSearchVectorizer, WebApiVectorizer as GeneratedWebAPIVectorizer, -} from "./generated/service/models"; -import { SearchResult, SelectFields, SuggestDocumentsResult, SuggestResult } from "./indexModels"; -import { logger } from "./logger"; +} from "./generated/service/models/index.js"; +import { SearchResult, SelectFields, SuggestDocumentsResult, SuggestResult } from "./indexModels.js"; +import { logger } from "./logger.js"; import { AzureOpenAIVectorizer, BlobIndexerDataToExtract, @@ -86,7 +86,7 @@ import { VectorSearchAlgorithmMetric, VectorSearchVectorizer, WebApiVectorizer, -} from "./serviceModels"; +} from "./serviceModels.js"; export const defaultServiceVersion = "2024-07-01"; diff --git a/sdk/search/search-documents/src/synonymMapHelper.ts b/sdk/search/search-documents/src/synonymMapHelper.ts index 6e002f2a7fbc..c7792d5701ac 100644 --- a/sdk/search/search-documents/src/synonymMapHelper.ts +++ b/sdk/search/search-documents/src/synonymMapHelper.ts @@ -3,7 +3,7 @@ import * as fs from "fs"; import { promisify } from "util"; -import { SynonymMap } from "./serviceModels"; +import { SynonymMap } from "./serviceModels.js"; const readFileAsync = promisify(fs.readFile); /** From 8c60b958bbd6588331937e1ad13b315c01e35080 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Tue, 30 Sep 2025 15:59:24 +0000 Subject: [PATCH 03/21] chore(search-documents): fix license header capitalization --- sdk/search/search-documents/src/base64-browser.mts | 2 +- sdk/search/search-documents/src/base64.ts | 2 +- sdk/search/search-documents/src/errorModels.ts | 2 +- sdk/search/search-documents/src/geographyPoint.ts | 2 +- sdk/search/search-documents/src/index.ts | 2 +- sdk/search/search-documents/src/indexDocumentsBatch.ts | 2 +- sdk/search/search-documents/src/indexModels.ts | 2 +- sdk/search/search-documents/src/logger.ts | 2 +- sdk/search/search-documents/src/odata.ts | 2 +- sdk/search/search-documents/src/odataMetadataPolicy.ts | 2 +- sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts | 2 +- sdk/search/search-documents/src/searchAudience.ts | 2 +- sdk/search/search-documents/src/searchClient.ts | 2 +- sdk/search/search-documents/src/searchIndexClient.ts | 2 +- sdk/search/search-documents/src/searchIndexerClient.ts | 2 +- sdk/search/search-documents/src/searchIndexingBufferedSender.ts | 2 +- sdk/search/search-documents/src/serialization.ts | 2 +- sdk/search/search-documents/src/serviceModels.ts | 2 +- sdk/search/search-documents/src/serviceUtils.ts | 2 +- sdk/search/search-documents/src/synonymMapHelper-browser.mts | 2 +- sdk/search/search-documents/src/synonymMapHelper.ts | 2 +- sdk/search/search-documents/src/tracing.ts | 2 +- sdk/search/search-documents/src/walk.ts | 2 +- 23 files changed, 23 insertions(+), 23 deletions(-) diff --git a/sdk/search/search-documents/src/base64-browser.mts b/sdk/search/search-documents/src/base64-browser.mts index 204fbaf0dcee..06c360dc14df 100644 --- a/sdk/search/search-documents/src/base64-browser.mts +++ b/sdk/search/search-documents/src/base64-browser.mts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /** * Encodes a string in base64 format. diff --git a/sdk/search/search-documents/src/base64.ts b/sdk/search/search-documents/src/base64.ts index c1be83174eff..b8a6a71d98e3 100644 --- a/sdk/search/search-documents/src/base64.ts +++ b/sdk/search/search-documents/src/base64.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /** * Encodes a string in base64 format. diff --git a/sdk/search/search-documents/src/errorModels.ts b/sdk/search/search-documents/src/errorModels.ts index 34723c353ef1..69090a3e1191 100644 --- a/sdk/search/search-documents/src/errorModels.ts +++ b/sdk/search/search-documents/src/errorModels.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /** * Common error response for all Azure Resource Manager APIs to return error details for failed diff --git a/sdk/search/search-documents/src/geographyPoint.ts b/sdk/search/search-documents/src/geographyPoint.ts index 6fc07ee7d14a..dd3f8b8ac916 100644 --- a/sdk/search/search-documents/src/geographyPoint.ts +++ b/sdk/search/search-documents/src/geographyPoint.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. const WorldGeodeticSystem1984 = "EPSG:4326"; // See https://epsg.io/4326 diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 69460acedcbf..70f856fa2d41 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. export { AzureKeyCredential } from "@azure/core-auth"; export { diff --git a/sdk/search/search-documents/src/indexDocumentsBatch.ts b/sdk/search/search-documents/src/indexDocumentsBatch.ts index c7a917e91f8e..2cf314c82a33 100644 --- a/sdk/search/search-documents/src/indexDocumentsBatch.ts +++ b/sdk/search/search-documents/src/indexDocumentsBatch.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { IndexDocumentsAction } from "./indexModels.js"; diff --git a/sdk/search/search-documents/src/indexModels.ts b/sdk/search/search-documents/src/indexModels.ts index ab0066a4b1eb..025271bb80c4 100644 --- a/sdk/search/search-documents/src/indexModels.ts +++ b/sdk/search/search-documents/src/indexModels.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { OperationOptions } from "@azure/core-client"; import { PagedAsyncIterableIterator } from "@azure/core-paging"; diff --git a/sdk/search/search-documents/src/logger.ts b/sdk/search/search-documents/src/logger.ts index 75335573005d..f3a939cdcde7 100644 --- a/sdk/search/search-documents/src/logger.ts +++ b/sdk/search/search-documents/src/logger.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { createClientLogger } from "@azure/logger"; diff --git a/sdk/search/search-documents/src/odata.ts b/sdk/search/search-documents/src/odata.ts index e8f8273b46cd..a02bd4b88fae 100644 --- a/sdk/search/search-documents/src/odata.ts +++ b/sdk/search/search-documents/src/odata.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. function formatNullAndUndefined(input: unknown): string | unknown { if (input === null || input === undefined) { diff --git a/sdk/search/search-documents/src/odataMetadataPolicy.ts b/sdk/search/search-documents/src/odataMetadataPolicy.ts index c6b873ee83c1..b2a6fa73e17e 100644 --- a/sdk/search/search-documents/src/odataMetadataPolicy.ts +++ b/sdk/search/search-documents/src/odataMetadataPolicy.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { PipelinePolicy, diff --git a/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts b/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts index 6e3571a4dcab..4f6daee5b21e 100644 --- a/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts +++ b/sdk/search/search-documents/src/searchApiKeyCredentialPolicy.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { KeyCredential } from "@azure/core-auth"; import { diff --git a/sdk/search/search-documents/src/searchAudience.ts b/sdk/search/search-documents/src/searchAudience.ts index c7eb679f1197..ebc76f688486 100644 --- a/sdk/search/search-documents/src/searchAudience.ts +++ b/sdk/search/search-documents/src/searchAudience.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /** * Known values for Search Audience diff --git a/sdk/search/search-documents/src/searchClient.ts b/sdk/search/search-documents/src/searchClient.ts index 687695b12d6a..cc5cd410d3e4 100644 --- a/sdk/search/search-documents/src/searchClient.ts +++ b/sdk/search/search-documents/src/searchClient.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /// diff --git a/sdk/search/search-documents/src/searchIndexClient.ts b/sdk/search/search-documents/src/searchIndexClient.ts index 41c4c1827d40..99f5efb4f60f 100644 --- a/sdk/search/search-documents/src/searchIndexClient.ts +++ b/sdk/search/search-documents/src/searchIndexClient.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /// diff --git a/sdk/search/search-documents/src/searchIndexerClient.ts b/sdk/search/search-documents/src/searchIndexerClient.ts index f63abbccc7ce..3ee6d917e7a8 100644 --- a/sdk/search/search-documents/src/searchIndexerClient.ts +++ b/sdk/search/search-documents/src/searchIndexerClient.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { isTokenCredential, KeyCredential, TokenCredential } from "@azure/core-auth"; import { InternalClientPipelineOptions } from "@azure/core-client"; diff --git a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts index 4e1c6a21437e..31c7e9565ef1 100644 --- a/sdk/search/search-documents/src/searchIndexingBufferedSender.ts +++ b/sdk/search/search-documents/src/searchIndexingBufferedSender.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { OperationOptions } from "@azure/core-client"; import { RestError } from "@azure/core-rest-pipeline"; diff --git a/sdk/search/search-documents/src/serialization.ts b/sdk/search/search-documents/src/serialization.ts index d8016fc1ecc7..b445c8d7b99f 100644 --- a/sdk/search/search-documents/src/serialization.ts +++ b/sdk/search/search-documents/src/serialization.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import GeographyPoint from "./geographyPoint.js"; import { walk } from "./walk.js"; diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index e22fb8cb9938..2a8f2e41a5f0 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { OperationOptions } from "@azure/core-client"; import { PagedAsyncIterableIterator } from "@azure/core-paging"; diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index 0109daba17ee..afc5a0aa475c 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { SearchResult as GeneratedSearchResult, diff --git a/sdk/search/search-documents/src/synonymMapHelper-browser.mts b/sdk/search/search-documents/src/synonymMapHelper-browser.mts index e588a2381fd7..b63fd204bcfa 100644 --- a/sdk/search/search-documents/src/synonymMapHelper-browser.mts +++ b/sdk/search/search-documents/src/synonymMapHelper-browser.mts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { SynonymMap } from "./serviceModels.js"; diff --git a/sdk/search/search-documents/src/synonymMapHelper.ts b/sdk/search/search-documents/src/synonymMapHelper.ts index c7792d5701ac..bc756125d296 100644 --- a/sdk/search/search-documents/src/synonymMapHelper.ts +++ b/sdk/search/search-documents/src/synonymMapHelper.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import * as fs from "fs"; import { promisify } from "util"; diff --git a/sdk/search/search-documents/src/tracing.ts b/sdk/search/search-documents/src/tracing.ts index 38bff4bae880..6678b4bea44a 100644 --- a/sdk/search/search-documents/src/tracing.ts +++ b/sdk/search/search-documents/src/tracing.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { createTracingClient } from "@azure/core-tracing"; diff --git a/sdk/search/search-documents/src/walk.ts b/sdk/search/search-documents/src/walk.ts index 8d1e12b25fd9..2fac66b1aed4 100644 --- a/sdk/search/search-documents/src/walk.ts +++ b/sdk/search/search-documents/src/walk.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { isDefined } from "@azure/core-util"; From 5cfd29788ebca82aca9d7b70e2ad7f506052562d Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 15:44:09 +0000 Subject: [PATCH 04/21] generate from last stable --- .../search-documents-browser.api.diff.md | 2 +- .../review/search-documents-node.api.md | 732 +----------------- .../src/generated/data/index.ts | 6 +- .../src/generated/data/models/index.ts | 2 +- .../src/generated/data/models/parameters.ts | 2 +- .../generated/data/operations/documents.ts | 10 +- .../src/generated/data/operations/index.ts | 2 +- .../data/operationsInterfaces/documents.ts | 2 +- .../data/operationsInterfaces/index.ts | 2 +- .../src/generated/data/searchClient.ts | 11 +- .../src/generated/service/index.ts | 6 +- .../src/generated/service/models/mappers.ts | 4 +- .../generated/service/models/parameters.ts | 2 +- .../generated/service/operations/aliases.ts | 208 +++++ .../service/operations/dataSources.ts | 10 +- .../src/generated/service/operations/index.ts | 10 +- .../generated/service/operations/indexers.ts | 10 +- .../generated/service/operations/indexes.ts | 10 +- .../generated/service/operations/skillsets.ts | 10 +- .../service/operations/synonymMaps.ts | 10 +- .../service/operationsInterfaces/aliases.ts | 68 ++ .../operationsInterfaces/dataSources.ts | 2 +- .../service/operationsInterfaces/index.ts | 10 +- .../service/operationsInterfaces/indexers.ts | 2 +- .../service/operationsInterfaces/indexes.ts | 2 +- .../service/operationsInterfaces/skillsets.ts | 2 +- .../operationsInterfaces/synonymMaps.ts | 2 +- .../generated/service/searchServiceClient.ts | 12 +- sdk/search/search-documents/swagger/Data.md | 6 +- .../search-documents/swagger/Service.md | 2 +- sdk/search/search-documents/test/README.md | 8 +- .../test/internal/base64.spec.ts | 11 +- .../browser/synonymMap.browser.spec.ts | 11 +- .../test/internal/geographyPoint.spec.ts | 15 +- .../internal/node/synonymMap.node.spec.ts | 9 +- .../test/internal/serialization.spec.ts | 63 +- .../test/internal/serviceUtils.spec.ts | 41 +- .../search-documents/test/narrowedTypes.ts | 18 +- .../test/public/node/searchClient.spec.ts | 253 +++--- .../public/node/searchIndexClient.spec.ts | 64 +- .../test/public/odata.spec.ts | 12 +- .../test/public/typeDefinitions.ts | 18 +- .../test/public/utils/interfaces.ts | 4 +- .../test/public/utils/recordedClient.ts | 14 +- .../test/public/utils/setup.ts | 55 +- .../search-documents/test/snippets.spec.ts | 380 +++++++++ 46 files changed, 1062 insertions(+), 1073 deletions(-) create mode 100644 sdk/search/search-documents/src/generated/service/operations/aliases.ts create mode 100644 sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts create mode 100644 sdk/search/search-documents/test/snippets.spec.ts diff --git a/sdk/search/search-documents/review/search-documents-browser.api.diff.md b/sdk/search/search-documents/review/search-documents-browser.api.diff.md index 0a763bc4d14b..b87770e20afc 100644 --- a/sdk/search/search-documents/review/search-documents-browser.api.diff.md +++ b/sdk/search/search-documents/review/search-documents-browser.api.diff.md @@ -7,7 +7,7 @@ For the complete API surface, see the corresponding -node.api.md file. =================================================================== --- NodeJS +++ browser -@@ -450,9 +450,9 @@ +@@ -351,9 +351,9 @@ // @public export type CreateSkillsetOptions = OperationOptions; diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index c456fddf5c83..7f17157e709c 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -5,47 +5,12 @@ ```ts import { AzureKeyCredential } from '@azure/core-auth'; -import type { ExtendedCommonClientOptions } from '@azure/core-http-compat'; -import type { KeyCredential } from '@azure/core-auth'; -import type { OperationOptions } from '@azure/core-client'; -import type { PagedAsyncIterableIterator } from '@azure/core-paging'; -import type { Pipeline } from '@azure/core-rest-pipeline'; -import type { RestError } from '@azure/core-rest-pipeline'; -import type { TokenCredential } from '@azure/core-auth'; - -// @public -export interface AIServicesAccountIdentity extends BaseCognitiveServicesAccount { - identity?: SearchIndexerDataIdentity; - odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity"; - subdomainUrl: string; -} - -// @public -export interface AIServicesAccountKey extends BaseCognitiveServicesAccount { - key: string; - odatatype: "#Microsoft.Azure.Search.AIServicesByKey"; - subdomainUrl: string; -} - -// @public -export interface AIServicesVisionParameters { - apiKey?: string; - authIdentity?: SearchIndexerDataIdentity; - modelVersion?: string; - resourceUri: string; -} - -// @public -export interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer { - kind: "aiServicesVision"; - parameters?: AIServicesVisionParameters; -} - -// @public -export type AIStudioModelCatalogName = string; - -// @public -export type AliasIterator = PagedAsyncIterableIterator; +import { ExtendedCommonClientOptions } from '@azure/core-http-compat'; +import { KeyCredential } from '@azure/core-auth'; +import { OperationOptions } from '@azure/core-client'; +import { PagedAsyncIterableIterator } from '@azure/core-paging'; +import { RestError } from '@azure/core-rest-pipeline'; +import { TokenCredential } from '@azure/core-auth'; // @public export interface AnalyzedTokenInfo { @@ -59,7 +24,6 @@ export interface AnalyzedTokenInfo { export interface AnalyzeRequest { analyzerName?: LexicalAnalyzerName; charFilters?: CharFilterName[]; - normalizerName?: LexicalNormalizerName; text: string; tokenFilters?: TokenFilterName[]; tokenizerName?: LexicalTokenizerName; @@ -117,26 +81,6 @@ export interface AzureActiveDirectoryApplicationCredentials { export { AzureKeyCredential } -// @public -export interface AzureMachineLearningSkill extends BaseSearchIndexerSkill { - authenticationKey?: string; - degreeOfParallelism?: number; - odatatype: "#Microsoft.Skills.Custom.AmlSkill"; - region?: string; - resourceId?: string; - scoringUri?: string; - timeout?: string; -} - -// @public -export interface AzureMachineLearningVectorizer extends BaseVectorSearchVectorizer { - amlParameters?: AzureMachineLearningVectorizerParameters; - kind: "aml"; -} - -// @public -export type AzureMachineLearningVectorizerParameters = NoAuthAzureMachineLearningVectorizerParameters | KeyAuthAzureMachineLearningVectorizerParameters | TokenAuthAzureMachineLearningVectorizerParameters; - // @public export interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill, AzureOpenAIParameters { dimensions?: number; @@ -155,24 +99,12 @@ export interface AzureOpenAIParameters { resourceUrl?: string; } -// @public (undocumented) -export interface AzureOpenAITokenizerParameters { - allowedSpecialTokens?: string[]; - encoderModelName?: SplitSkillEncoderModelName; -} - // @public export interface AzureOpenAIVectorizer extends BaseVectorSearchVectorizer { kind: "azureOpenAI"; parameters?: AzureOpenAIParameters; } -// @public -export interface BaseAzureMachineLearningVectorizerParameters { - modelName?: AIStudioModelCatalogName; - timeout?: string; -} - // @public export interface BaseCharFilter { name: string; @@ -182,7 +114,7 @@ export interface BaseCharFilter { // @public export interface BaseCognitiveServicesAccount { description?: string; - odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey" | "#Microsoft.Azure.Search.AIServicesByKey" | "#Microsoft.Azure.Search.AIServicesByIdentity"; + odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey"; } // @public @@ -192,7 +124,7 @@ export interface BaseDataChangeDetectionPolicy { // @public export interface BaseDataDeletionDetectionPolicy { - odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; + odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"; } // @public @@ -201,12 +133,6 @@ export interface BaseLexicalAnalyzer { odatatype: "#Microsoft.Azure.Search.CustomAnalyzer" | "#Microsoft.Azure.Search.PatternAnalyzer" | "#Microsoft.Azure.Search.StandardAnalyzer" | "#Microsoft.Azure.Search.StopAnalyzer"; } -// @public -export interface BaseLexicalNormalizer { - name: string; - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; -} - // @public export interface BaseLexicalTokenizer { name: string; @@ -232,7 +158,7 @@ export interface BaseSearchIndexerSkill { description?: string; inputs: InputFieldMappingEntry[]; name?: string; - odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Custom.AmlSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" | "#Microsoft.Skills.Vision.VectorizeSkill"; + odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; outputs: OutputFieldMappingEntry[]; } @@ -243,11 +169,9 @@ export interface BaseSearchRequestOptions; sessionId?: string; skip?: number; - speller?: QuerySpeller; top?: number; vectorSearchOptions?: VectorSearchOptions; } @@ -272,11 +195,9 @@ export interface BaseTokenFilter { export interface BaseVectorQuery { exhaustive?: boolean; fields?: SearchFieldArray; - filterOverride?: string; kind: VectorQueryKind; kNearestNeighborsCount?: number; oversampling?: number; - threshold?: VectorThreshold; weight?: number; } @@ -292,8 +213,6 @@ export interface BaseVectorSearchCompression { defaultOversampling?: number; kind: "scalarQuantization" | "binaryQuantization"; rerankWithOriginalVectors?: boolean; - rescoringOptions?: RescoringOptions; - truncationDimension?: number; } // @public @@ -302,11 +221,6 @@ export interface BaseVectorSearchVectorizer { vectorizerName: string; } -// @public -export interface BaseVectorThreshold { - kind: "vectorSimilarity" | "searchScore"; -} - // @public export interface BinaryQuantizationCompression extends BaseVectorSearchCompression { kind: "binaryQuantization"; @@ -359,7 +273,7 @@ export interface ClassicTokenizer extends BaseLexicalTokenizer { } // @public -export type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey | AIServicesAccountKey | AIServicesAccountIdentity; +export type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey; // @public export interface CognitiveServicesAccountKey extends BaseCognitiveServicesAccount { @@ -399,9 +313,6 @@ export interface CorsOptions { // @public export type CountDocumentsOptions = OperationOptions; -// @public -export type CreateAliasOptions = OperationOptions; - // @public export type CreateDataSourceConnectionOptions = OperationOptions; @@ -411,22 +322,14 @@ export type CreateIndexerOptions = OperationOptions; // @public export type CreateIndexOptions = OperationOptions; -// @public -export interface CreateOrUpdateAliasOptions extends OperationOptions { - onlyIfUnchanged?: boolean; -} - // @public export interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions { onlyIfUnchanged?: boolean; - skipIndexerResetRequirementForCache?: boolean; } // @public export interface CreateorUpdateIndexerOptions extends OperationOptions { - disableCacheReprocessingChangeDetection?: boolean; onlyIfUnchanged?: boolean; - skipIndexerResetRequirementForCache?: boolean; } // @public @@ -437,9 +340,7 @@ export interface CreateOrUpdateIndexOptions extends OperationOptions { // @public export interface CreateOrUpdateSkillsetOptions extends OperationOptions { - disableCacheReprocessingChangeDetection?: boolean; onlyIfUnchanged?: boolean; - skipIndexerResetRequirementForCache?: boolean; } // @public @@ -503,23 +404,11 @@ export interface CustomEntityLookupSkill extends BaseSearchIndexerSkill { // @public (undocumented) export type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`; -// @public -export interface CustomNormalizer extends BaseLexicalNormalizer { - charFilters?: CharFilterName[]; - odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; - tokenFilters?: TokenFilterName[]; -} - // @public export type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy; // @public -export type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy | NativeBlobSoftDeleteDeletionDetectionPolicy; - -// @public -export interface DebugInfo { - readonly queryRewrites?: QueryRewritesDebugInfo; -} +export type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy; // @public export const DEFAULT_BATCH_SIZE: number; @@ -535,11 +424,6 @@ export interface DefaultCognitiveServicesAccount extends BaseCognitiveServicesAc odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices"; } -// @public -export interface DeleteAliasOptions extends OperationOptions { - onlyIfUnchanged?: boolean; -} - // @public export interface DeleteDataSourceConnectionOptions extends OperationOptions { onlyIfUnchanged?: boolean; @@ -590,12 +474,6 @@ export interface DistanceScoringParameters { referencePointParameter: string; } -// @public -export interface DocumentDebugInfo { - readonly semantic?: SemanticDebugInfo; - readonly vectors?: VectorsDebugInfo; -} - // @public export interface DocumentExtractionSkill extends BaseSearchIndexerSkill { configuration?: { @@ -606,19 +484,6 @@ export interface DocumentExtractionSkill extends BaseSearchIndexerSkill { parsingMode?: string; } -// @public -export interface DocumentIntelligenceLayoutSkill extends BaseSearchIndexerSkill { - markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; - odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; - outputMode?: DocumentIntelligenceLayoutSkillOutputMode; -} - -// @public -export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; - -// @public -export type DocumentIntelligenceLayoutSkillOutputMode = string; - // @public export interface EdgeNGramTokenFilter { maxGram?: number; @@ -701,7 +566,6 @@ export interface ExtractiveQueryAnswer { // (undocumented) answerType: "extractive"; count?: number; - maxAnswerLength?: number; threshold?: number; } @@ -711,16 +575,12 @@ export interface ExtractiveQueryCaption { captionType: "extractive"; // (undocumented) highlight?: boolean; - maxCaptionLength?: number; } // @public export interface FacetResult { [property: string]: any; readonly count?: number; - readonly facets?: { - [propertyName: string]: FacetResult[]; - }; } // @public @@ -749,12 +609,6 @@ export interface FreshnessScoringParameters { boostingDuration: string; } -// @public -export interface GenerativeQueryRewrites { - count?: number; - rewritesType: "generative"; -} - // @public export class GeographyPoint { constructor(geographyPoint: { @@ -766,9 +620,6 @@ export class GeographyPoint { toJSON(): Record; } -// @public -export type GetAliasOptions = OperationOptions; - // @public export type GetDataSourceConnectionOptions = OperationOptions; @@ -818,15 +669,6 @@ export interface HnswParameters { metric?: VectorSearchAlgorithmMetric; } -// @public -export type HybridCountAndFacetMode = string; - -// @public -export interface HybridSearchOptions { - countAndFacetMode?: HybridCountAndFacetMode; - maxTextRecallSize?: number; -} - // @public export interface ImageAnalysisSkill extends BaseSearchIndexerSkill { defaultLanguageCode?: ImageAnalysisSkillLanguage; @@ -888,7 +730,6 @@ export type IndexerExecutionEnvironment = `${KnownIndexerExecutionEnvironment}`; // @public export interface IndexerExecutionResult { - readonly currentState?: IndexerState; readonly endTime?: Date; readonly errorMessage?: string; readonly errors: SearchIndexerError[]; @@ -898,33 +739,15 @@ export interface IndexerExecutionResult { readonly itemCount: number; readonly startTime?: Date; readonly status: IndexerExecutionStatus; - readonly statusDetail?: IndexerExecutionStatusDetail; readonly warnings: SearchIndexerWarning[]; } // @public export type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset"; -// @public -export type IndexerExecutionStatusDetail = string; - -// @public -export interface IndexerState { - readonly allDocumentsFinalChangeTrackingState?: string; - readonly allDocumentsInitialChangeTrackingState?: string; - readonly mode?: IndexingMode; - readonly resetDatasourceDocumentIds?: string[]; - readonly resetDocumentKeys?: string[]; - readonly resetDocumentsFinalChangeTrackingState?: string; - readonly resetDocumentsInitialChangeTrackingState?: string; -} - // @public export type IndexerStatus = "unknown" | "error" | "running"; -// @public -export type IndexingMode = string; - // @public export interface IndexingParameters { batchSize?: number; @@ -949,8 +772,6 @@ export interface IndexingParametersConfiguration { imageAction?: BlobIndexerImageAction; indexedFileNameExtensions?: string; indexStorageMetadataOnlyForOversizedDocuments?: boolean; - markdownHeaderDepth?: MarkdownHeaderDepth; - markdownParsingSubmode?: MarkdownParsingSubmode; parsingMode?: BlobIndexerParsingMode; pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm; queryTimeout?: string; @@ -994,13 +815,6 @@ export interface KeepTokenFilter extends BaseTokenFilter { odatatype: "#Microsoft.Azure.Search.KeepTokenFilter"; } -// @public -export interface KeyAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters { - authenticationKey: string; - authKind: "key"; - scoringUri: string; -} - // @public export interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill { defaultLanguageCode?: KeyPhraseExtractionSkillLanguage; @@ -1026,16 +840,6 @@ export interface KeywordTokenizer { odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2" | "#Microsoft.Azure.Search.KeywordTokenizer"; } -// @public -export enum KnownAIStudioModelCatalogName { - CohereEmbedV3English = "Cohere-embed-v3-english", - CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual", - FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base", - FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant", - OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", - OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" -} - // @public export enum KnownAnalyzerNames { ArLucene = "ar.lucene", @@ -1161,7 +965,6 @@ export enum KnownBlobIndexerParsingMode { Json = "json", JsonArray = "jsonArray", JsonLines = "jsonLines", - Markdown = "markdown", Text = "text" } @@ -1189,21 +992,6 @@ export enum KnownCustomEntityLookupSkillLanguage { Pt = "pt" } -// @public -export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { - H1 = "h1", - H2 = "h2", - H3 = "h3", - H4 = "h4", - H5 = "h5", - H6 = "h6" -} - -// @public -export enum KnownDocumentIntelligenceLayoutSkillOutputMode { - OneToMany = "oneToMany" -} - // @public export enum KnownEntityCategory { Datetime = "datetime", @@ -1242,12 +1030,6 @@ export enum KnownEntityRecognitionSkillLanguage { ZhHant = "zh-Hant" } -// @public -export enum KnownHybridCountAndFacetMode { - CountAllResults = "countAllResults", - CountRetrievableResults = "countRetrievableResults" -} - // @public export enum KnownImageAnalysisSkillLanguage { Ar = "ar", @@ -1316,17 +1098,6 @@ export enum KnownIndexerExecutionEnvironment { Standard = "standard" } -// @public -export enum KnownIndexerExecutionStatusDetail { - ResetDocs = "resetDocs" -} - -// @public -export enum KnownIndexingMode { - IndexingAllDocs = "indexingAllDocs", - IndexingResetDocs = "indexingResetDocs" -} - // @public export enum KnownIndexProjectionMode { IncludeIndexingParentDocuments = "includeIndexingParentDocuments", @@ -1353,130 +1124,6 @@ export enum KnownKeyPhraseExtractionSkillLanguage { Sv = "sv" } -// @public -export enum KnownLexicalAnalyzerName { - ArLucene = "ar.lucene", - ArMicrosoft = "ar.microsoft", - BgLucene = "bg.lucene", - BgMicrosoft = "bg.microsoft", - BnMicrosoft = "bn.microsoft", - CaLucene = "ca.lucene", - CaMicrosoft = "ca.microsoft", - CsLucene = "cs.lucene", - CsMicrosoft = "cs.microsoft", - DaLucene = "da.lucene", - DaMicrosoft = "da.microsoft", - DeLucene = "de.lucene", - DeMicrosoft = "de.microsoft", - ElLucene = "el.lucene", - ElMicrosoft = "el.microsoft", - EnLucene = "en.lucene", - EnMicrosoft = "en.microsoft", - EsLucene = "es.lucene", - EsMicrosoft = "es.microsoft", - EtMicrosoft = "et.microsoft", - EuLucene = "eu.lucene", - FaLucene = "fa.lucene", - FiLucene = "fi.lucene", - FiMicrosoft = "fi.microsoft", - FrLucene = "fr.lucene", - FrMicrosoft = "fr.microsoft", - GaLucene = "ga.lucene", - GlLucene = "gl.lucene", - GuMicrosoft = "gu.microsoft", - HeMicrosoft = "he.microsoft", - HiLucene = "hi.lucene", - HiMicrosoft = "hi.microsoft", - HrMicrosoft = "hr.microsoft", - HuLucene = "hu.lucene", - HuMicrosoft = "hu.microsoft", - HyLucene = "hy.lucene", - IdLucene = "id.lucene", - IdMicrosoft = "id.microsoft", - IsMicrosoft = "is.microsoft", - ItLucene = "it.lucene", - ItMicrosoft = "it.microsoft", - JaLucene = "ja.lucene", - JaMicrosoft = "ja.microsoft", - Keyword = "keyword", - KnMicrosoft = "kn.microsoft", - KoLucene = "ko.lucene", - KoMicrosoft = "ko.microsoft", - LtMicrosoft = "lt.microsoft", - LvLucene = "lv.lucene", - LvMicrosoft = "lv.microsoft", - MlMicrosoft = "ml.microsoft", - MrMicrosoft = "mr.microsoft", - MsMicrosoft = "ms.microsoft", - NbMicrosoft = "nb.microsoft", - NlLucene = "nl.lucene", - NlMicrosoft = "nl.microsoft", - NoLucene = "no.lucene", - PaMicrosoft = "pa.microsoft", - Pattern = "pattern", - PlLucene = "pl.lucene", - PlMicrosoft = "pl.microsoft", - PtBrLucene = "pt-BR.lucene", - PtBrMicrosoft = "pt-BR.microsoft", - PtPtLucene = "pt-PT.lucene", - PtPtMicrosoft = "pt-PT.microsoft", - RoLucene = "ro.lucene", - RoMicrosoft = "ro.microsoft", - RuLucene = "ru.lucene", - RuMicrosoft = "ru.microsoft", - Simple = "simple", - SkMicrosoft = "sk.microsoft", - SlMicrosoft = "sl.microsoft", - SrCyrillicMicrosoft = "sr-cyrillic.microsoft", - SrLatinMicrosoft = "sr-latin.microsoft", - StandardAsciiFoldingLucene = "standardasciifolding.lucene", - StandardLucene = "standard.lucene", - Stop = "stop", - SvLucene = "sv.lucene", - SvMicrosoft = "sv.microsoft", - TaMicrosoft = "ta.microsoft", - TeMicrosoft = "te.microsoft", - ThLucene = "th.lucene", - ThMicrosoft = "th.microsoft", - TrLucene = "tr.lucene", - TrMicrosoft = "tr.microsoft", - UkMicrosoft = "uk.microsoft", - UrMicrosoft = "ur.microsoft", - ViMicrosoft = "vi.microsoft", - Whitespace = "whitespace", - ZhHansLucene = "zh-Hans.lucene", - ZhHansMicrosoft = "zh-Hans.microsoft", - ZhHantLucene = "zh-Hant.lucene", - ZhHantMicrosoft = "zh-Hant.microsoft" -} - -// @public -enum KnownLexicalNormalizerName { - AsciiFolding = "asciifolding", - Elision = "elision", - Lowercase = "lowercase", - Standard = "standard", - Uppercase = "uppercase" -} -export { KnownLexicalNormalizerName } -export { KnownLexicalNormalizerName as KnownNormalizerNames } - -// @public -export enum KnownMarkdownHeaderDepth { - H1 = "h1", - H2 = "h2", - H3 = "h3", - H4 = "h4", - H5 = "h5", - H6 = "h6" -} - -// @public -export enum KnownMarkdownParsingSubmode { - OneToMany = "oneToMany", - OneToOne = "oneToOne" -} - // @public export enum KnownOcrLineEnding { CarriageReturn = "carriageReturn", @@ -1665,97 +1312,6 @@ export enum KnownPIIDetectionSkillMaskingMode { Replace = "replace" } -// @public -export enum KnownQueryDebugMode { - All = "all", - Disabled = "disabled", - QueryRewrites = "queryRewrites", - Semantic = "semantic", - Vector = "vector" -} - -// @public -export enum KnownQueryLanguage { - ArEg = "ar-eg", - ArJo = "ar-jo", - ArKw = "ar-kw", - ArMa = "ar-ma", - ArSa = "ar-sa", - BgBg = "bg-bg", - BnIn = "bn-in", - CaEs = "ca-es", - CsCz = "cs-cz", - DaDk = "da-dk", - DeDe = "de-de", - ElGr = "el-gr", - EnAu = "en-au", - EnCa = "en-ca", - EnGb = "en-gb", - EnIn = "en-in", - EnUs = "en-us", - EsEs = "es-es", - EsMx = "es-mx", - EtEe = "et-ee", - EuEs = "eu-es", - FaAe = "fa-ae", - FiFi = "fi-fi", - FrCa = "fr-ca", - FrFr = "fr-fr", - GaIe = "ga-ie", - GlEs = "gl-es", - GuIn = "gu-in", - HeIl = "he-il", - HiIn = "hi-in", - HrBa = "hr-ba", - HrHr = "hr-hr", - HuHu = "hu-hu", - HyAm = "hy-am", - IdId = "id-id", - IsIs = "is-is", - ItIt = "it-it", - JaJp = "ja-jp", - KnIn = "kn-in", - KoKr = "ko-kr", - LtLt = "lt-lt", - LvLv = "lv-lv", - MlIn = "ml-in", - MrIn = "mr-in", - MsBn = "ms-bn", - MsMy = "ms-my", - NbNo = "nb-no", - NlBe = "nl-be", - NlNl = "nl-nl", - None = "none", - NoNo = "no-no", - PaIn = "pa-in", - PlPl = "pl-pl", - PtBr = "pt-br", - PtPt = "pt-pt", - RoRo = "ro-ro", - RuRu = "ru-ru", - SkSk = "sk-sk", - SlSl = "sl-sl", - SrBa = "sr-ba", - SrMe = "sr-me", - SrRs = "sr-rs", - SvSe = "sv-se", - TaIn = "ta-in", - TeIn = "te-in", - ThTh = "th-th", - TrTr = "tr-tr", - UkUa = "uk-ua", - UrPk = "ur-pk", - ViVn = "vi-vn", - ZhCn = "zh-cn", - ZhTw = "zh-tw" -} - -// @public -export enum KnownQuerySpeller { - Lexicon = "lexicon", - None = "none" -} - // @public export enum KnownRegexFlags { CanonEq = "CANON_EQ", @@ -1799,8 +1355,7 @@ export enum KnownSearchIndexerDataSourceType { AzureSql = "azuresql", AzureTable = "azuretable", CosmosDb = "cosmosdb", - MySql = "mysql", - OneLake = "onelake" + MySql = "mysql" } // @public @@ -1816,18 +1371,6 @@ export enum KnownSemanticErrorReason { Transient = "transient" } -// @public -export enum KnownSemanticFieldState { - Partial = "partial", - Unused = "unused", - Used = "used" -} - -// @public -export enum KnownSemanticQueryRewritesResultType { - OriginalQueryOnly = "originalQueryOnly" -} - // @public export enum KnownSemanticSearchResultsType { BaseResults = "baseResults", @@ -1853,14 +1396,6 @@ export enum KnownSentimentSkillLanguage { Tr = "tr" } -// @public -export enum KnownSplitSkillEncoderModelName { - CL100KBase = "cl100k_base", - P50KBase = "p50k_base", - P50KEdit = "p50k_edit", - R50KBase = "r50k_base" -} - // @public export enum KnownSplitSkillLanguage { Am = "am", @@ -1898,12 +1433,6 @@ export enum KnownSplitSkillLanguage { Zh = "zh" } -// @public -export enum KnownSplitSkillUnit { - AzureOpenAITokens = "azureOpenAITokens", - Characters = "characters" -} - // @public export enum KnownTextSplitMode { Pages = "pages", @@ -2054,9 +1583,7 @@ export enum KnownVectorFilterMode { // @public export enum KnownVectorQueryKind { - ImageBinary = "imageBinary", - ImageUrl = "imageUrl", - Text = "text", + $DO_NOT_NORMALIZE$_text = "text", Vector = "vector" } @@ -2080,12 +1607,6 @@ export enum KnownVectorSearchCompressionKind { ScalarQuantization = "scalarQuantization" } -// @public -export enum KnownVectorSearchCompressionRescoreStorageMethod { - DiscardOriginals = "discardOriginals", - PreserveOriginals = "preserveOriginals" -} - // @public export enum KnownVectorSearchCompressionTarget { Int8 = "int8" @@ -2093,18 +1614,10 @@ export enum KnownVectorSearchCompressionTarget { // @public export enum KnownVectorSearchVectorizerKind { - AIServicesVision = "aiServicesVision", - AML = "aml", AzureOpenAI = "azureOpenAI", CustomWebApi = "customWebApi" } -// @public -export enum KnownVectorThresholdKind { - SearchScore = "searchScore", - VectorSimilarity = "vectorSimilarity" -} - // @public export enum KnownVisualFeature { Adult = "adult", @@ -2136,12 +1649,6 @@ export type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneStandardA // @public export type LexicalAnalyzerName = string; -// @public -export type LexicalNormalizer = CustomNormalizer; - -// @public -export type LexicalNormalizerName = string; - // @public export type LexicalTokenizer = ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizer | PatternTokenizer | LuceneStandardTokenizer | UaxUrlEmailTokenizer; @@ -2155,9 +1662,6 @@ export interface LimitTokenFilter extends BaseTokenFilter { odatatype: "#Microsoft.Azure.Search.LimitTokenFilter"; } -// @public -export type ListAliasesOptions = OperationOptions; - // @public export type ListDataSourceConnectionsOptions = OperationOptions; @@ -2211,12 +1715,6 @@ export interface MappingCharFilter extends BaseCharFilter { odatatype: "#Microsoft.Azure.Search.MappingCharFilter"; } -// @public -export type MarkdownHeaderDepth = string; - -// @public -export type MarkdownParsingSubmode = string; - // @public export type MergeDocumentsOptions = IndexDocumentsOptions; @@ -2255,11 +1753,6 @@ export type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catalan" | "c // @public export type NarrowedModel = SelectFields> = (() => T extends TModel ? true : false) extends () => T extends never ? true : false ? TModel : (() => T extends TModel ? true : false) extends () => T extends object ? true : false ? TModel : (() => T extends TModel ? true : false) extends () => T extends any ? true : false ? TModel : (() => T extends TModel ? true : false) extends () => T extends unknown ? true : false ? TModel : (() => T extends TFields ? true : false) extends () => T extends never ? true : false ? never : (() => T extends TFields ? true : false) extends () => T extends SelectFields ? true : false ? TModel : SearchPick; -// @public -export interface NativeBlobSoftDeleteDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy { - odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"; -} - // @public export interface NGramTokenFilter { maxGram?: number; @@ -2276,19 +1769,12 @@ export interface NGramTokenizer extends BaseLexicalTokenizer { tokenChars?: TokenCharacterKind[]; } -// @public -export interface NoAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters { - authKind: "none"; - scoringUri: string; -} - // @public export type OcrLineEnding = string; // @public export interface OcrSkill extends BaseSearchIndexerSkill { defaultLanguageCode?: OcrSkillLanguage; - lineEnding?: OcrLineEnding; odatatype: "#Microsoft.Skills.Vision.OcrSkill"; shouldDetectOrientation?: boolean; } @@ -2402,80 +1888,15 @@ export interface QueryCaptionResult { readonly text?: string; } -// @public -export type QueryDebugMode = string; - -// @public -export type QueryLanguage = string; - -// @public -export interface QueryResultDocumentRerankerInput { - readonly content?: string; - readonly keywords?: string; - readonly title?: string; -} - -// @public -export interface QueryResultDocumentSemanticField { - readonly name?: string; - readonly state?: SemanticFieldState; -} - -// @public -export interface QueryResultDocumentSubscores { - readonly documentBoost?: number; - readonly text?: TextResult; - readonly vectors?: { - [propertyName: string]: SingleVectorFieldResult; - }[]; -} - -// @public -export type QueryRewrites = GenerativeQueryRewrites; - -// @public -export interface QueryRewritesDebugInfo { - readonly text?: QueryRewritesValuesDebugInfo; - readonly vectors?: QueryRewritesValuesDebugInfo[]; -} - -// @public -export interface QueryRewritesValuesDebugInfo { - readonly inputQuery?: string; - readonly rewrites?: string[]; -} - -// @public -export type QuerySpeller = string; - // @public export type QueryType = "simple" | "full" | "semantic"; // @public (undocumented) export type RegexFlags = `${KnownRegexFlags}`; -// @public -export interface RescoringOptions { - defaultOversampling?: number; - enableRescoring?: boolean; - rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; -} - -// @public -export interface ResetDocumentsOptions extends OperationOptions { - datasourceDocumentIds?: string[]; - documentKeys?: string[]; - overwrite?: boolean; -} - // @public export type ResetIndexerOptions = OperationOptions; -// @public -export interface ResetSkillsOptions extends OperationOptions { - skillNames?: string[]; -} - // @public export interface ResourceCounter { quota?: number; @@ -2516,13 +1937,6 @@ export interface ScoringProfile { // @public export type ScoringStatistics = "local" | "global"; -// @public -export interface SearchAlias { - etag?: string; - indexes: string[]; - name: string; -} - // @public export class SearchClient implements IndexDocumentsClient { constructor(endpoint: string, indexName: string, credential: KeyCredential | TokenCredential, options?: SearchClientOptions); @@ -2538,7 +1952,6 @@ export class SearchClient implements IndexDocumentsClient readonly indexName: string; mergeDocuments(documents: TModel[], options?: MergeDocumentsOptions): Promise; mergeOrUploadDocuments(documents: TModel[], options?: MergeOrUploadDocumentsOptions): Promise; - readonly pipeline: Pipeline; search>(searchText?: string, options?: SearchOptions): Promise>; readonly serviceVersion: string; suggest = never>(searchText: string, suggesterName: string, options?: SuggestOptions): Promise>; @@ -2569,12 +1982,10 @@ export interface SearchDocumentsResultBase { readonly answers?: QueryAnswerResult[]; readonly count?: number; readonly coverage?: number; - readonly debugInfo?: DebugInfo; readonly facets?: { [propertyName: string]: FacetResult[]; }; readonly semanticErrorReason?: SemanticErrorReason; - readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType; readonly semanticSearchResultsType?: SemanticSearchResultsType; } @@ -2597,7 +2008,6 @@ export interface SearchIndex { etag?: string; fields: SearchField[]; name: string; - normalizers?: LexicalNormalizer[]; scoringProfiles?: ScoringProfile[]; semanticSearch?: SemanticSearch; similarity?: SimilarityAlgorithm; @@ -2607,37 +2017,28 @@ export interface SearchIndex { vectorSearch?: VectorSearch; } -// @public -export type SearchIndexAlias = SearchAlias; - // @public export class SearchIndexClient { constructor(endpoint: string, credential: KeyCredential | TokenCredential, options?: SearchIndexClientOptions); analyzeText(indexName: string, options: AnalyzeTextOptions): Promise; // @deprecated readonly apiVersion: string; - createAlias(alias: SearchIndexAlias, options?: CreateAliasOptions): Promise; createIndex(index: SearchIndex, options?: CreateIndexOptions): Promise; - createOrUpdateAlias(alias: SearchIndexAlias, options?: CreateOrUpdateAliasOptions): Promise; createOrUpdateIndex(index: SearchIndex, options?: CreateOrUpdateIndexOptions): Promise; createOrUpdateSynonymMap(synonymMap: SynonymMap, options?: CreateOrUpdateSynonymMapOptions): Promise; createSynonymMap(synonymMap: SynonymMap, options?: CreateSynonymMapOptions): Promise; - deleteAlias(alias: string | SearchIndexAlias, options?: DeleteAliasOptions): Promise; deleteIndex(index: string | SearchIndex, options?: DeleteIndexOptions): Promise; deleteSynonymMap(synonymMap: string | SynonymMap, options?: DeleteSynonymMapOptions): Promise; readonly endpoint: string; - getAlias(aliasName: string, options?: GetAliasOptions): Promise; getIndex(indexName: string, options?: GetIndexOptions): Promise; getIndexStatistics(indexName: string, options?: GetIndexStatisticsOptions): Promise; getSearchClient(indexName: string, options?: SearchClientOptions): SearchClient; getServiceStatistics(options?: GetServiceStatisticsOptions): Promise; getSynonymMap(synonymMapName: string, options?: GetSynonymMapsOptions): Promise; - listAliases(options?: ListAliasesOptions): AliasIterator; listIndexes(options?: ListIndexesOptions): IndexIterator; listIndexesNames(options?: ListIndexesOptions): IndexNameIterator; listSynonymMaps(options?: ListSynonymMapsOptions): Promise>; listSynonymMapsNames(options?: ListSynonymMapsOptions): Promise>; - readonly pipeline: Pipeline; readonly serviceVersion: string; } @@ -2651,7 +2052,6 @@ export interface SearchIndexClientOptions extends ExtendedCommonClientOptions { // @public export interface SearchIndexer { - cache?: SearchIndexerCache; dataSourceName: string; description?: string; encryptionKey?: SearchResourceEncryptionKey; @@ -2666,13 +2066,6 @@ export interface SearchIndexer { targetIndexName: string; } -// @public (undocumented) -export interface SearchIndexerCache { - enableReprocessing?: boolean; - identity?: SearchIndexerDataIdentity; - storageConnectionString?: string; -} - // @public export class SearchIndexerClient { constructor(endpoint: string, credential: KeyCredential | TokenCredential, options?: SearchIndexerClientOptions); @@ -2698,10 +2091,7 @@ export class SearchIndexerClient { listIndexersNames(options?: ListIndexersOptions): Promise>; listSkillsets(options?: ListSkillsetsOptions): Promise>; listSkillsetsNames(options?: ListSkillsetsOptions): Promise>; - readonly pipeline: Pipeline; - resetDocuments(indexerName: string, options?: ResetDocumentsOptions): Promise; resetIndexer(indexerName: string, options?: ResetIndexerOptions): Promise; - resetSkills(skillsetName: string, options?: ResetSkillsOptions): Promise; runIndexer(indexerName: string, options?: RunIndexerOptions): Promise; readonly serviceVersion: string; } @@ -2836,7 +2226,7 @@ export interface SearchIndexerLimits { } // @public -export type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | DocumentIntelligenceLayoutSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | VisionVectorizeSkill | WebApiSkill; +export type SearchIndexerSkill = AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill; // @public export interface SearchIndexerSkillset { @@ -2971,15 +2361,8 @@ export type SearchResult; - readonly documentDebugInfo?: DocumentDebugInfo; }; -// @public -export interface SearchScoreThreshold extends BaseVectorThreshold { - kind: "searchScore"; - value: number; -} - // @public export interface SearchServiceStatistics { counters: ServiceCounters; @@ -3007,14 +2390,6 @@ export interface SemanticConfiguration { prioritizedFields: SemanticPrioritizedFields; } -// @public -export interface SemanticDebugInfo { - readonly contentFields?: QueryResultDocumentSemanticField[]; - readonly keywordFields?: QueryResultDocumentSemanticField[]; - readonly rerankerInput?: QueryResultDocumentRerankerInput; - readonly titleField?: QueryResultDocumentSemanticField; -} - // @public (undocumented) export type SemanticErrorMode = `${KnownSemanticErrorMode}`; @@ -3027,9 +2402,6 @@ export interface SemanticField { name: string; } -// @public -export type SemanticFieldState = string; - // @public export interface SemanticPrioritizedFields { contentFields?: SemanticField[]; @@ -3037,9 +2409,6 @@ export interface SemanticPrioritizedFields { titleField?: SemanticField; } -// @public -export type SemanticQueryRewritesResultType = string; - // @public export interface SemanticSearch { configurations?: SemanticConfiguration[]; @@ -3051,11 +2420,8 @@ export interface SemanticSearchOptions { answers?: QueryAnswer; captions?: QueryCaption; configurationName?: string; - debugMode?: QueryDebugMode; errorMode?: SemanticErrorMode; maxWaitInMilliseconds?: number; - queryRewrites?: QueryRewrites; - semanticFields?: string[]; semanticQuery?: string; } @@ -3081,7 +2447,6 @@ export interface SentimentSkillV3 extends BaseSearchIndexerSkill { // @public export interface ServiceCounters { - aliasCounter: ResourceCounter; dataSourceCounter: ResourceCounter; documentCounter: ResourceCounter; indexCounter: ResourceCounter; @@ -3134,7 +2499,6 @@ export interface SimpleField { indexAnalyzerName?: LexicalAnalyzerName; key?: boolean; name: string; - normalizerName?: LexicalNormalizerName; searchable?: boolean; searchAnalyzerName?: LexicalAnalyzerName; sortable?: boolean; @@ -3146,12 +2510,6 @@ export interface SimpleField { vectorSearchProfileName?: string; } -// @public -export interface SingleVectorFieldResult { - readonly searchScore?: number; - readonly vectorSimilarity?: number; -} - // @public export interface SnowballTokenFilter extends BaseTokenFilter { language: SnowballTokenFilterLanguage; @@ -3170,25 +2528,15 @@ export interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDataDeletio // @public export interface SplitSkill extends BaseSearchIndexerSkill { - azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters; defaultLanguageCode?: SplitSkillLanguage; - maximumPagesToTake?: number; maxPageLength?: number; odatatype: "#Microsoft.Skills.Text.SplitSkill"; - pageOverlapLength?: number; textSplitMode?: TextSplitMode; - unit?: SplitSkillUnit; } -// @public -export type SplitSkillEncoderModelName = string; - // @public (undocumented) export type SplitSkillLanguage = `${KnownSplitSkillLanguage}`; -// @public -export type SplitSkillUnit = string; - // @public export interface SqlIntegratedChangeTrackingPolicy extends BaseDataChangeDetectionPolicy { odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"; @@ -3285,11 +2633,6 @@ export interface TagScoringParameters { tagsParameter: string; } -// @public -export interface TextResult { - readonly searchScore?: number; -} - // @public (undocumented) export type TextSplitMode = `${KnownTextSplitMode}`; @@ -3311,13 +2654,6 @@ export interface TextWeights { }; } -// @public -export interface TokenAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters { - authKind: "token"; - region?: string; - resourceId: string; -} - // @public export type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol"; @@ -3357,22 +2693,9 @@ export type VectorEncodingFormat = string; // @public (undocumented) export type VectorFilterMode = `${KnownVectorFilterMode}`; -// @public -export interface VectorizableImageBinaryQuery extends BaseVectorQuery { - binaryImage: string; - kind: "imageBinary"; -} - -// @public -export interface VectorizableImageUrlQuery extends BaseVectorQuery { - kind: "imageUrl"; - url: string; -} - // @public export interface VectorizableTextQuery extends BaseVectorQuery { kind: "text"; - queryRewrites?: QueryRewrites; text: string; } @@ -3383,16 +2706,11 @@ export interface VectorizedQuery extends BaseVectorQuery< } // @public -export type VectorQuery = VectorizedQuery | VectorizableTextQuery | VectorizableImageUrlQuery | VectorizableImageBinaryQuery; +export type VectorQuery = VectorizedQuery | VectorizableTextQuery; // @public (undocumented) export type VectorQueryKind = `${KnownVectorQueryKind}`; -// @public (undocumented) -export interface VectorsDebugInfo { - readonly subscores?: QueryResultDocumentSubscores; -} - // @public export interface VectorSearch { algorithms?: VectorSearchAlgorithmConfiguration[]; @@ -3416,9 +2734,6 @@ export type VectorSearchCompression = BinaryQuantizationCompression | ScalarQuan // @public export type VectorSearchCompressionKind = string; -// @public -export type VectorSearchCompressionRescoreStorageMethod = string; - // @public export type VectorSearchCompressionTarget = string; @@ -3437,26 +2752,11 @@ export interface VectorSearchProfile { } // @public -export type VectorSearchVectorizer = AIServicesVisionVectorizer | AzureMachineLearningVectorizer | AzureOpenAIVectorizer | WebApiVectorizer; +export type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVectorizer; // @public export type VectorSearchVectorizerKind = string; -// @public -export interface VectorSimilarityThreshold extends BaseVectorThreshold { - kind: "vectorSimilarity"; - value: number; -} - -// @public -export type VectorThreshold = VectorSimilarityThreshold | SearchScoreThreshold; - -// @public -export interface VisionVectorizeSkill extends BaseSearchIndexerSkill { - modelVersion?: string; - odatatype: "#Microsoft.Skills.Vision.VectorizeSkill"; -} - // @public (undocumented) export type VisualFeature = `${KnownVisualFeature}`; diff --git a/sdk/search/search-documents/src/generated/data/index.ts b/sdk/search/search-documents/src/generated/data/index.ts index f3df3736e075..2bee12aaf341 100644 --- a/sdk/search/search-documents/src/generated/data/index.ts +++ b/sdk/search/search-documents/src/generated/data/index.ts @@ -6,6 +6,6 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./models"; -export { SearchClient } from "./searchClient"; -export * from "./operationsInterfaces"; +export * from "./models/index.js"; +export { SearchClient } from "./searchClient.js"; +export * from "./operationsInterfaces/index.js"; diff --git a/sdk/search/search-documents/src/generated/data/models/index.ts b/sdk/search/search-documents/src/generated/data/models/index.ts index e7aebcf7b22d..37c8e7e053bb 100644 --- a/sdk/search/search-documents/src/generated/data/models/index.ts +++ b/sdk/search/search-documents/src/generated/data/models/index.ts @@ -599,7 +599,7 @@ export enum KnownVectorQueryKind { /** Vector query where a raw vector value is provided. */ Vector = "vector", /** Vector query where a text value that needs to be vectorized is provided. */ - Text = "text", + $DO_NOT_NORMALIZE$_text = "text", } /** diff --git a/sdk/search/search-documents/src/generated/data/models/parameters.ts b/sdk/search/search-documents/src/generated/data/models/parameters.ts index a4822b2e3931..a907865abac9 100644 --- a/sdk/search/search-documents/src/generated/data/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/data/models/parameters.ts @@ -16,7 +16,7 @@ import { SuggestRequest as SuggestRequestMapper, IndexBatch as IndexBatchMapper, AutocompleteRequest as AutocompleteRequestMapper, -} from "../models/mappers"; +} from "../models/mappers.js"; export const accept: OperationParameter = { parameterPath: "accept", diff --git a/sdk/search/search-documents/src/generated/data/operations/documents.ts b/sdk/search/search-documents/src/generated/data/operations/documents.ts index 301b5da08027..d79dae48a486 100644 --- a/sdk/search/search-documents/src/generated/data/operations/documents.ts +++ b/sdk/search/search-documents/src/generated/data/operations/documents.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Documents } from "../operationsInterfaces"; +import { Documents } from "../operationsInterfaces/index.js"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers"; -import * as Parameters from "../models/parameters"; -import { SearchClient } from "../searchClient"; +import * as Mappers from "../models/mappers.js"; +import * as Parameters from "../models/parameters.js"; +import { SearchClient } from "../searchClient.js"; import { DocumentsCountOptionalParams, DocumentsCountResponse, @@ -34,7 +34,7 @@ import { AutocompleteRequest, DocumentsAutocompletePostOptionalParams, DocumentsAutocompletePostResponse, -} from "../models"; +} from "../models/index.js"; /** Class containing Documents operations. */ export class DocumentsImpl implements Documents { diff --git a/sdk/search/search-documents/src/generated/data/operations/index.ts b/sdk/search/search-documents/src/generated/data/operations/index.ts index 77c96e3f8b79..e6fde9effe60 100644 --- a/sdk/search/search-documents/src/generated/data/operations/index.ts +++ b/sdk/search/search-documents/src/generated/data/operations/index.ts @@ -6,4 +6,4 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./documents"; +export * from "./documents.js"; diff --git a/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts b/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts index 2cedcc7c4163..cf365fcb51c8 100644 --- a/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts +++ b/sdk/search/search-documents/src/generated/data/operationsInterfaces/documents.ts @@ -29,7 +29,7 @@ import { AutocompleteRequest, DocumentsAutocompletePostOptionalParams, DocumentsAutocompletePostResponse, -} from "../models"; +} from "../models/index.js"; /** Interface representing a Documents. */ export interface Documents { diff --git a/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts index 77c96e3f8b79..e6fde9effe60 100644 --- a/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts +++ b/sdk/search/search-documents/src/generated/data/operationsInterfaces/index.ts @@ -6,4 +6,4 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./documents"; +export * from "./documents.js"; diff --git a/sdk/search/search-documents/src/generated/data/searchClient.ts b/sdk/search/search-documents/src/generated/data/searchClient.ts index 7773e7490bac..10bfa133754c 100644 --- a/sdk/search/search-documents/src/generated/data/searchClient.ts +++ b/sdk/search/search-documents/src/generated/data/searchClient.ts @@ -12,9 +12,12 @@ import { PipelineResponse, SendRequest, } from "@azure/core-rest-pipeline"; -import { DocumentsImpl } from "./operations"; -import { Documents } from "./operationsInterfaces"; -import { ApiVersion20240701, SearchClientOptionalParams } from "./models"; +import { DocumentsImpl } from "./operations/index.js"; +import { Documents } from "./operationsInterfaces/index.js"; +import { + ApiVersion20240701, + SearchClientOptionalParams, +} from "./models/index.js"; /** @internal */ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { @@ -53,7 +56,7 @@ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { requestContentType: "application/json; charset=utf-8", }; - const packageDetails = `azsdk-js-search-documents/12.1.0`; + const packageDetails = `azsdk-js-search-documents/12.2.0-beta.2`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` diff --git a/sdk/search/search-documents/src/generated/service/index.ts b/sdk/search/search-documents/src/generated/service/index.ts index 707b92eedf04..275645f04104 100644 --- a/sdk/search/search-documents/src/generated/service/index.ts +++ b/sdk/search/search-documents/src/generated/service/index.ts @@ -6,6 +6,6 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./models"; -export { SearchServiceClient } from "./searchServiceClient"; -export * from "./operationsInterfaces"; +export * from "./models/index.js"; +export { SearchServiceClient } from "./searchServiceClient.js"; +export * from "./operationsInterfaces/index.js"; diff --git a/sdk/search/search-documents/src/generated/service/models/mappers.ts b/sdk/search/search-documents/src/generated/service/models/mappers.ts index 7c580008a25c..fc8bfb778c7b 100644 --- a/sdk/search/search-documents/src/generated/service/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/service/models/mappers.ts @@ -4066,7 +4066,7 @@ export const PatternAnalyzer: coreClient.CompositeMapper = { }, }, pattern: { - defaultValue: "W+", + defaultValue: "\W+", serializedName: "pattern", type: { name: "String", @@ -4543,7 +4543,7 @@ export const PatternTokenizer: coreClient.CompositeMapper = { modelProperties: { ...LexicalTokenizer.type.modelProperties, pattern: { - defaultValue: "W+", + defaultValue: "\W+", serializedName: "pattern", type: { name: "String", diff --git a/sdk/search/search-documents/src/generated/service/models/parameters.ts b/sdk/search/search-documents/src/generated/service/models/parameters.ts index 12db0fcabf41..957601452128 100644 --- a/sdk/search/search-documents/src/generated/service/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/service/models/parameters.ts @@ -18,7 +18,7 @@ import { SynonymMap as SynonymMapMapper, SearchIndex as SearchIndexMapper, AnalyzeRequest as AnalyzeRequestMapper, -} from "../models/mappers"; +} from "../models/mappers.js"; export const contentType: OperationParameter = { parameterPath: ["options", "contentType"], diff --git a/sdk/search/search-documents/src/generated/service/operations/aliases.ts b/sdk/search/search-documents/src/generated/service/operations/aliases.ts new file mode 100644 index 000000000000..0c250f23b9c5 --- /dev/null +++ b/sdk/search/search-documents/src/generated/service/operations/aliases.ts @@ -0,0 +1,208 @@ +// /* +// * Copyright (c) Microsoft Corporation. +// * Licensed under the MIT License. +// * +// * Code generated by Microsoft (R) AutoRest Code Generator. +// * Changes may cause incorrect behavior and will be lost if the code is regenerated. +// */ + +// import { Aliases } from "../operationsInterfaces/index.js"; +// import * as coreClient from "@azure/core-client"; +// import * as Mappers from "../models/mappers.js"; +// import * as Parameters from "../models/parameters.js"; +// import { SearchServiceClient } from "../searchServiceClient.js"; +// import { +// SearchAlias, +// AliasesCreateOptionalParams, +// AliasesCreateResponse, +// AliasesListOptionalParams, +// AliasesListResponse, +// AliasesCreateOrUpdateOptionalParams, +// AliasesCreateOrUpdateResponse, +// AliasesDeleteOptionalParams, +// AliasesGetOptionalParams, +// AliasesGetResponse, +// } from "../models/index.js"; + +// /** Class containing Aliases operations. */ +// export class AliasesImpl implements Aliases { +// private readonly client: SearchServiceClient; + +// /** +// * Initialize a new instance of the class Aliases class. +// * @param client Reference to the service client +// */ +// constructor(client: SearchServiceClient) { +// this.client = client; +// } + +// /** +// * Creates a new search alias. +// * @param alias The definition of the alias to create. +// * @param options The options parameters. +// */ +// create( +// alias: SearchAlias, +// options?: AliasesCreateOptionalParams, +// ): Promise { +// return this.client.sendOperationRequest( +// { alias, options }, +// createOperationSpec, +// ); +// } + +// /** +// * Lists all aliases available for a search service. +// * @param options The options parameters. +// */ +// list(options?: AliasesListOptionalParams): Promise { +// return this.client.sendOperationRequest({ options }, listOperationSpec); +// } + +// /** +// * Creates a new search alias or updates an alias if it already exists. +// * @param aliasName The definition of the alias to create or update. +// * @param alias The definition of the alias to create or update. +// * @param options The options parameters. +// */ +// createOrUpdate( +// aliasName: string, +// alias: SearchAlias, +// options?: AliasesCreateOrUpdateOptionalParams, +// ): Promise { +// return this.client.sendOperationRequest( +// { aliasName, alias, options }, +// createOrUpdateOperationSpec, +// ); +// } + +// /** +// * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no +// * recovery option. The mapped index is untouched by this operation. +// * @param aliasName The name of the alias to delete. +// * @param options The options parameters. +// */ +// delete( +// aliasName: string, +// options?: AliasesDeleteOptionalParams, +// ): Promise { +// return this.client.sendOperationRequest( +// { aliasName, options }, +// deleteOperationSpec, +// ); +// } + +// /** +// * Retrieves an alias definition. +// * @param aliasName The name of the alias to retrieve. +// * @param options The options parameters. +// */ +// get( +// aliasName: string, +// options?: AliasesGetOptionalParams, +// ): Promise { +// return this.client.sendOperationRequest( +// { aliasName, options }, +// getOperationSpec, +// ); +// } +// } +// // Operation Specifications +// const serializer = coreClient.createSerializer(Mappers, /* isXml */ false); + +// const createOperationSpec: coreClient.OperationSpec = { +// path: "/aliases", +// httpMethod: "POST", +// responses: { +// 201: { +// bodyMapper: Mappers.SearchAlias, +// }, +// default: { +// bodyMapper: Mappers.ErrorResponse, +// }, +// }, +// requestBody: Parameters.alias, +// queryParameters: [Parameters.apiVersion], +// urlParameters: [Parameters.endpoint], +// headerParameters: [Parameters.contentType, Parameters.accept], +// mediaType: "json", +// serializer, +// }; +// const listOperationSpec: coreClient.OperationSpec = { +// path: "/aliases", +// httpMethod: "GET", +// responses: { +// 200: { +// bodyMapper: Mappers.ListAliasesResult, +// }, +// default: { +// bodyMapper: Mappers.ErrorResponse, +// }, +// }, +// queryParameters: [Parameters.apiVersion], +// urlParameters: [Parameters.endpoint], +// headerParameters: [Parameters.accept], +// serializer, +// }; +// const createOrUpdateOperationSpec: coreClient.OperationSpec = { +// path: "/aliases('{aliasName}')", +// httpMethod: "PUT", +// responses: { +// 200: { +// bodyMapper: Mappers.SearchAlias, +// }, +// 201: { +// bodyMapper: Mappers.SearchAlias, +// }, +// default: { +// bodyMapper: Mappers.ErrorResponse, +// }, +// }, +// requestBody: Parameters.alias, +// queryParameters: [Parameters.apiVersion], +// urlParameters: [Parameters.endpoint, Parameters.aliasName], +// headerParameters: [ +// Parameters.contentType, +// Parameters.accept, +// Parameters.ifMatch, +// Parameters.ifNoneMatch, +// Parameters.prefer, +// ], +// mediaType: "json", +// serializer, +// }; +// const deleteOperationSpec: coreClient.OperationSpec = { +// path: "/aliases('{aliasName}')", +// httpMethod: "DELETE", +// responses: { +// 204: {}, +// 404: {}, +// default: { +// bodyMapper: Mappers.ErrorResponse, +// }, +// }, +// queryParameters: [Parameters.apiVersion], +// urlParameters: [Parameters.endpoint, Parameters.aliasName], +// headerParameters: [ +// Parameters.accept, +// Parameters.ifMatch, +// Parameters.ifNoneMatch, +// ], +// serializer, +// }; +// const getOperationSpec: coreClient.OperationSpec = { +// path: "/aliases('{aliasName}')", +// httpMethod: "GET", +// responses: { +// 200: { +// bodyMapper: Mappers.SearchAlias, +// }, +// default: { +// bodyMapper: Mappers.ErrorResponse, +// }, +// }, +// queryParameters: [Parameters.apiVersion], +// urlParameters: [Parameters.endpoint, Parameters.aliasName], +// headerParameters: [Parameters.accept], +// serializer, +// }; diff --git a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts index 71292ac4e3d3..b66f9cee7f8f 100644 --- a/sdk/search/search-documents/src/generated/service/operations/dataSources.ts +++ b/sdk/search/search-documents/src/generated/service/operations/dataSources.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { DataSources } from "../operationsInterfaces"; +import { DataSources } from "../operationsInterfaces/index.js"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers"; -import * as Parameters from "../models/parameters"; -import { SearchServiceClient } from "../searchServiceClient"; +import * as Mappers from "../models/mappers.js"; +import * as Parameters from "../models/parameters.js"; +import { SearchServiceClient } from "../searchServiceClient.js"; import { SearchIndexerDataSource, DataSourcesCreateOrUpdateOptionalParams, @@ -22,7 +22,7 @@ import { DataSourcesListResponse, DataSourcesCreateOptionalParams, DataSourcesCreateResponse, -} from "../models"; +} from "../models/index.js"; /** Class containing DataSources operations. */ export class DataSourcesImpl implements DataSources { diff --git a/sdk/search/search-documents/src/generated/service/operations/index.ts b/sdk/search/search-documents/src/generated/service/operations/index.ts index 896ae33eded4..ce88c8ccad72 100644 --- a/sdk/search/search-documents/src/generated/service/operations/index.ts +++ b/sdk/search/search-documents/src/generated/service/operations/index.ts @@ -6,8 +6,8 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./dataSources"; -export * from "./indexers"; -export * from "./skillsets"; -export * from "./synonymMaps"; -export * from "./indexes"; +export * from "./dataSources.js"; +export * from "./indexers.js"; +export * from "./skillsets.js"; +export * from "./synonymMaps.js"; +export * from "./indexes.js"; diff --git a/sdk/search/search-documents/src/generated/service/operations/indexers.ts b/sdk/search/search-documents/src/generated/service/operations/indexers.ts index dbdb52a59026..839a9f61567b 100644 --- a/sdk/search/search-documents/src/generated/service/operations/indexers.ts +++ b/sdk/search/search-documents/src/generated/service/operations/indexers.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Indexers } from "../operationsInterfaces"; +import { Indexers } from "../operationsInterfaces/index.js"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers"; -import * as Parameters from "../models/parameters"; -import { SearchServiceClient } from "../searchServiceClient"; +import * as Mappers from "../models/mappers.js"; +import * as Parameters from "../models/parameters.js"; +import { SearchServiceClient } from "../searchServiceClient.js"; import { IndexersResetOptionalParams, IndexersRunOptionalParams, @@ -26,7 +26,7 @@ import { IndexersCreateResponse, IndexersGetStatusOptionalParams, IndexersGetStatusResponse, -} from "../models"; +} from "../models/index.js"; /** Class containing Indexers operations. */ export class IndexersImpl implements Indexers { diff --git a/sdk/search/search-documents/src/generated/service/operations/indexes.ts b/sdk/search/search-documents/src/generated/service/operations/indexes.ts index c456c969db12..7487b922cf78 100644 --- a/sdk/search/search-documents/src/generated/service/operations/indexes.ts +++ b/sdk/search/search-documents/src/generated/service/operations/indexes.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Indexes } from "../operationsInterfaces"; +import { Indexes } from "../operationsInterfaces/index.js"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers"; -import * as Parameters from "../models/parameters"; -import { SearchServiceClient } from "../searchServiceClient"; +import * as Mappers from "../models/mappers.js"; +import * as Parameters from "../models/parameters.js"; +import { SearchServiceClient } from "../searchServiceClient.js"; import { SearchIndex, IndexesCreateOptionalParams, @@ -27,7 +27,7 @@ import { AnalyzeRequest, IndexesAnalyzeOptionalParams, IndexesAnalyzeResponse, -} from "../models"; +} from "../models/index.js"; /** Class containing Indexes operations. */ export class IndexesImpl implements Indexes { diff --git a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts index c9394176f298..be99880d64db 100644 --- a/sdk/search/search-documents/src/generated/service/operations/skillsets.ts +++ b/sdk/search/search-documents/src/generated/service/operations/skillsets.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { Skillsets } from "../operationsInterfaces"; +import { Skillsets } from "../operationsInterfaces/index.js"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers"; -import * as Parameters from "../models/parameters"; -import { SearchServiceClient } from "../searchServiceClient"; +import * as Mappers from "../models/mappers.js"; +import * as Parameters from "../models/parameters.js"; +import { SearchServiceClient } from "../searchServiceClient.js"; import { SearchIndexerSkillset, SkillsetsCreateOrUpdateOptionalParams, @@ -22,7 +22,7 @@ import { SkillsetsListResponse, SkillsetsCreateOptionalParams, SkillsetsCreateResponse, -} from "../models"; +} from "../models/index.js"; /** Class containing Skillsets operations. */ export class SkillsetsImpl implements Skillsets { diff --git a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts index afde7649c7d9..8bd281f6aeda 100644 --- a/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts +++ b/sdk/search/search-documents/src/generated/service/operations/synonymMaps.ts @@ -6,11 +6,11 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -import { SynonymMaps } from "../operationsInterfaces"; +import { SynonymMaps } from "../operationsInterfaces/index.js"; import * as coreClient from "@azure/core-client"; -import * as Mappers from "../models/mappers"; -import * as Parameters from "../models/parameters"; -import { SearchServiceClient } from "../searchServiceClient"; +import * as Mappers from "../models/mappers.js"; +import * as Parameters from "../models/parameters.js"; +import { SearchServiceClient } from "../searchServiceClient.js"; import { SynonymMap, SynonymMapsCreateOrUpdateOptionalParams, @@ -22,7 +22,7 @@ import { SynonymMapsListResponse, SynonymMapsCreateOptionalParams, SynonymMapsCreateResponse, -} from "../models"; +} from "../models/index.js"; /** Class containing SynonymMaps operations. */ export class SynonymMapsImpl implements SynonymMaps { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts new file mode 100644 index 000000000000..e057ed768a45 --- /dev/null +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/aliases.ts @@ -0,0 +1,68 @@ +// /* +// * Copyright (c) Microsoft Corporation. +// * Licensed under the MIT License. +// * +// * Code generated by Microsoft (R) AutoRest Code Generator. +// * Changes may cause incorrect behavior and will be lost if the code is regenerated. +// */ + +// import { +// SearchAlias, +// AliasesCreateOptionalParams, +// AliasesCreateResponse, +// AliasesListOptionalParams, +// AliasesListResponse, +// AliasesCreateOrUpdateOptionalParams, +// AliasesCreateOrUpdateResponse, +// AliasesDeleteOptionalParams, +// AliasesGetOptionalParams, +// AliasesGetResponse, +// } from "../models/index.js"; + +// /** Interface representing a Aliases. */ +// export interface Aliases { +// /** +// * Creates a new search alias. +// * @param alias The definition of the alias to create. +// * @param options The options parameters. +// */ +// create( +// alias: SearchAlias, +// options?: AliasesCreateOptionalParams, +// ): Promise; +// /** +// * Lists all aliases available for a search service. +// * @param options The options parameters. +// */ +// list(options?: AliasesListOptionalParams): Promise; +// /** +// * Creates a new search alias or updates an alias if it already exists. +// * @param aliasName The definition of the alias to create or update. +// * @param alias The definition of the alias to create or update. +// * @param options The options parameters. +// */ +// createOrUpdate( +// aliasName: string, +// alias: SearchAlias, +// options?: AliasesCreateOrUpdateOptionalParams, +// ): Promise; +// /** +// * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no +// * recovery option. The mapped index is untouched by this operation. +// * @param aliasName The name of the alias to delete. +// * @param options The options parameters. +// */ +// delete( +// aliasName: string, +// options?: AliasesDeleteOptionalParams, +// ): Promise; +// /** +// * Retrieves an alias definition. +// * @param aliasName The name of the alias to retrieve. +// * @param options The options parameters. +// */ +// get( +// aliasName: string, +// options?: AliasesGetOptionalParams, +// ): Promise; +// } diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts index 801ff187e26a..36a165a3974f 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/dataSources.ts @@ -17,7 +17,7 @@ import { DataSourcesListResponse, DataSourcesCreateOptionalParams, DataSourcesCreateResponse, -} from "../models"; +} from "../models/index.js"; /** Interface representing a DataSources. */ export interface DataSources { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts index 896ae33eded4..ce88c8ccad72 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/index.ts @@ -6,8 +6,8 @@ * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ -export * from "./dataSources"; -export * from "./indexers"; -export * from "./skillsets"; -export * from "./synonymMaps"; -export * from "./indexes"; +export * from "./dataSources.js"; +export * from "./indexers.js"; +export * from "./skillsets.js"; +export * from "./synonymMaps.js"; +export * from "./indexes.js"; diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts index 695f6815f47e..7a2501641a48 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexers.ts @@ -21,7 +21,7 @@ import { IndexersCreateResponse, IndexersGetStatusOptionalParams, IndexersGetStatusResponse, -} from "../models"; +} from "../models/index.js"; /** Interface representing a Indexers. */ export interface Indexers { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts index dc88a3a325d4..97c64eb3214c 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/indexes.ts @@ -22,7 +22,7 @@ import { AnalyzeRequest, IndexesAnalyzeOptionalParams, IndexesAnalyzeResponse, -} from "../models"; +} from "../models/index.js"; /** Interface representing a Indexes. */ export interface Indexes { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts index 04ba69fa9b27..42378fafa8d1 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/skillsets.ts @@ -17,7 +17,7 @@ import { SkillsetsListResponse, SkillsetsCreateOptionalParams, SkillsetsCreateResponse, -} from "../models"; +} from "../models/index.js"; /** Interface representing a Skillsets. */ export interface Skillsets { diff --git a/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts b/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts index b26e83a49d74..12eefed6a043 100644 --- a/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts +++ b/sdk/search/search-documents/src/generated/service/operationsInterfaces/synonymMaps.ts @@ -17,7 +17,7 @@ import { SynonymMapsListResponse, SynonymMapsCreateOptionalParams, SynonymMapsCreateResponse, -} from "../models"; +} from "../models/index.js"; /** Interface representing a SynonymMaps. */ export interface SynonymMaps { diff --git a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts index c21faa69bd76..2c5f749bb603 100644 --- a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts +++ b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts @@ -19,22 +19,22 @@ import { SkillsetsImpl, SynonymMapsImpl, IndexesImpl, -} from "./operations"; +} from "./operations/index.js"; import { DataSources, Indexers, Skillsets, SynonymMaps, Indexes, -} from "./operationsInterfaces"; -import * as Parameters from "./models/parameters"; -import * as Mappers from "./models/mappers"; +} from "./operationsInterfaces/index.js"; +import * as Parameters from "./models/parameters.js"; +import * as Mappers from "./models/mappers.js"; import { ApiVersion20240701, SearchServiceClientOptionalParams, GetServiceStatisticsOptionalParams, GetServiceStatisticsResponse, -} from "./models"; +} from "./models/index.js"; /** @internal */ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { @@ -67,7 +67,7 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { requestContentType: "application/json; charset=utf-8", }; - const packageDetails = `azsdk-js-search-documents/12.1.0`; + const packageDetails = `azsdk-js-search-documents/12.2.0-beta.2`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` diff --git a/sdk/search/search-documents/swagger/Data.md b/sdk/search/search-documents/swagger/Data.md index eb033c447e37..ef404c2ed595 100644 --- a/sdk/search/search-documents/swagger/Data.md +++ b/sdk/search/search-documents/swagger/Data.md @@ -10,7 +10,7 @@ generate-metadata: false license-header: MICROSOFT_MIT_NO_VERSION output-folder: ../ source-code-folder-path: ./src/generated/data -input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/14531a7cf6101c1dd57e7c1c83103a047bb8f5bb/specification/search/data-plane/Azure.Search/preview/2024-11-01-preview/searchindex.json +input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/dc27f9b32787533cd4d07fe0de5245f2f8354dbe/specification/search/data-plane/Azure.Search/stable/2024-07-01/searchindex.json add-credentials: false title: SearchClient use-extension: @@ -167,7 +167,7 @@ directive: $["$ref"] = "#/definitions/HybridSearch"; ``` -### Fix `SearchResult["@search.documentDebugInfo"]` + diff --git a/sdk/search/search-documents/swagger/Service.md b/sdk/search/search-documents/swagger/Service.md index 7ecc45be5da6..7268baf7432e 100644 --- a/sdk/search/search-documents/swagger/Service.md +++ b/sdk/search/search-documents/swagger/Service.md @@ -10,7 +10,7 @@ generate-metadata: false license-header: MICROSOFT_MIT_NO_VERSION output-folder: ../ source-code-folder-path: ./src/generated/service -input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/14531a7cf6101c1dd57e7c1c83103a047bb8f5bb/specification/search/data-plane/Azure.Search/preview/2024-11-01-preview/searchservice.json +input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/dc27f9b32787533cd4d07fe0de5245f2f8354dbe/specification/search/data-plane/Azure.Search/stable/2024-07-01/searchservice.json add-credentials: false use-extension: "@autorest/typescript": "6.0.34" diff --git a/sdk/search/search-documents/test/README.md b/sdk/search/search-documents/test/README.md index b9d063c8d05e..ea9c3f96c4a9 100644 --- a/sdk/search/search-documents/test/README.md +++ b/sdk/search/search-documents/test/README.md @@ -2,19 +2,15 @@ To test this project, make sure to build it by following our [building instructions](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md#building), then follow the [testing instructions](https://github.com/Azure/azure-sdk-for-js/blob/main/CONTRIBUTING.md#testing). -The Azure Cognitive Search client does not have any recorded tests and so, all the tests require an Azure Cognitive Search account to be set up beforehand. You can use existing Azure resources for the live tests, or generate new ones by using our [New-TestResources.ps1](https://github.com/Azure/azure-sdk-for-js/blob/main/eng/common/TestResources/New-TestResources.ps1) script, which will use an [ARM template](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/search/test-resources.json) that already has all of the the necessary configurations. +The Azure Cognitive Search client does not have any recorded tests and so, all the tests require an Azure Cognitive Search account to be set up beforehand. You can use existing Azure resources for the live tests, or generate new ones by using our [New-TestResources.ps1](https://github.com/Azure/azure-sdk-for-js/blob/main/eng/common/TestResources/New-TestResources.ps1) script, which will use a [Bicep template](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/search/test-resources.bicep) that already has all of the the necessary configurations. The Azure resource that is used by the tests in this project is: -- An [Azure Cognitive Search](https://docs.microsoft.com/azure/search/search-what-is-azure-search) account. +- An [Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) account. To run the live tests, you will also need to set the below environment variables: - `TEST_MODE`: Should have `live` assigned. -- `SEARCH_API_ADMIN_KEY`: The primary key of your Azure Search account. -- `SEARCH_API_ADMIN_KEY_ALT` (optional): The secondary key of your Azure Search account. - `ENDPOINT`: The endpoint of your Azure Search account. The live tests in this project will create, populate and search over search indexes inside of the provided Azure Cognitive Search account. - -![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-js%2Fsdk%2Fsearch%2Fsearch-documents%2Ftest%2FREADME.png) diff --git a/sdk/search/search-documents/test/internal/base64.spec.ts b/sdk/search/search-documents/test/internal/base64.spec.ts index a5021d7ed23a..252a27b289c0 100644 --- a/sdk/search/search-documents/test/internal/base64.spec.ts +++ b/sdk/search/search-documents/test/internal/base64.spec.ts @@ -1,11 +1,10 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. +import { decode, encode } from "../../src/base64.js"; +import { describe, it, assert } from "vitest"; -import { assert } from "chai"; -import { decode, encode } from "../../src/base64"; - -describe("base64", function () { - it("strings can roundtrip", function () { +describe("base64", () => { + it("strings can roundtrip", () => { const message = "Only *you* can prevent null dereferences!"; const encoded = encode(message); const decoded = decode(encoded); diff --git a/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts b/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts index 24581907eec8..3564ba4da121 100644 --- a/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts +++ b/sdk/search/search-documents/test/internal/browser/synonymMap.browser.spec.ts @@ -1,11 +1,10 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. +import { createSynonymMapFromFile } from "../../../src/synonymMapHelper.js"; +import { describe, it, assert } from "vitest"; -import { assert } from "chai"; -import { createSynonymMapFromFile } from "../../../src/synonymMapHelper.browser"; - -describe("synonymmap", function () { - it("create synonymmap from file(browser)", async function () { +describe("synonymmap", () => { + it("create synonymmap from file(browser)", async () => { let errorThrown = false; try { await createSynonymMapFromFile("my-synonym-map-1", "./test/internal/synonymMap.txt"); diff --git a/sdk/search/search-documents/test/internal/geographyPoint.spec.ts b/sdk/search/search-documents/test/internal/geographyPoint.spec.ts index 7c194b88902c..eee95607cf3f 100644 --- a/sdk/search/search-documents/test/internal/geographyPoint.spec.ts +++ b/sdk/search/search-documents/test/internal/geographyPoint.spec.ts @@ -1,12 +1,11 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. -import { assert } from "chai"; -import * as sinon from "sinon"; -import GeographyPoint from "../../src/geographyPoint"; +import GeographyPoint from "../../src/geographyPoint.js"; +import { describe, it, assert } from "vitest"; -describe("geographyPoint", function () { - it("JSON.stringify", function () { +describe("geographyPoint", () => { + it("JSON.stringify", () => { const geoPoint = new GeographyPoint({ longitude: -122.123889, latitude: 47.669444, @@ -18,8 +17,4 @@ describe("geographyPoint", function () { crs: { type: "name", properties: { name: "EPSG:4326" } }, }); }); - - afterEach(function () { - sinon.restore(); - }); }); diff --git a/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts b/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts index ddcefd4c9afe..d0fcfc404b34 100644 --- a/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts +++ b/sdk/search/search-documents/test/internal/node/synonymMap.node.spec.ts @@ -1,9 +1,8 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { assert } from "chai"; -import { createSynonymMapFromFile } from "../../../src"; -import { SynonymMap } from "../../../src/serviceModels"; +// Licensed under the MIT License. +import { createSynonymMapFromFile } from "../../../src/index.js"; +import type { SynonymMap } from "../../../src/serviceModels.js"; +import { describe, it, assert } from "vitest"; describe("synonymmap", function () { it("create synonymmap from file(node)", async function () { diff --git a/sdk/search/search-documents/test/internal/serialization.spec.ts b/sdk/search/search-documents/test/internal/serialization.spec.ts index 44e0aa9d9e7f..092c727c75c9 100644 --- a/sdk/search/search-documents/test/internal/serialization.spec.ts +++ b/sdk/search/search-documents/test/internal/serialization.spec.ts @@ -1,26 +1,25 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. -import { assert } from "chai"; -import * as sinon from "sinon"; -import GeographyPoint from "../../src/geographyPoint"; -import { deserialize, serialize } from "../../src/serialization"; +import GeographyPoint from "../../src/geographyPoint.js"; +import { deserialize, serialize } from "../../src/serialization.js"; +import { describe, it, assert } from "vitest"; -describe("serialization.serialize", function () { - it("nested", function () { +describe("serialization.serialize", () => { + it("nested", () => { const nestedInput = { a: { b: { c: { d: [42] } } } }; const result = serialize(nestedInput); assert.deepEqual(nestedInput, result); }); - it("circular", function () { + it("circular", () => { const circularInput: any = { a: null }; circularInput.a = circularInput; const result = serialize(circularInput); assert.deepEqual(circularInput, result); }); - it("recursive 1", function () { + it("recursive 1", () => { const child = { hello: "world" }; const documents = [ { id: "1", children: [child] }, @@ -30,7 +29,7 @@ describe("serialization.serialize", function () { assert.deepEqual(documents, result); }); - it("recursive 2", function () { + it("recursive 2", () => { const child = { hello: Infinity, world: -Infinity, universe: NaN }; const expectChild = { hello: "INF", world: "-INF", universe: "NaN" }; const documents = [ @@ -44,22 +43,22 @@ describe("serialization.serialize", function () { assert.deepEqual(result, expect); }); - it("NaN", function () { + it("NaN", () => { const result = serialize({ a: NaN }); assert.deepEqual(result, { a: "NaN" }); }); - it("Infinity", function () { + it("Infinity", () => { const result = serialize({ a: Infinity }); assert.deepEqual(result, { a: "INF" }); }); - it("Negative Infinity", function () { + it("Negative Infinity", () => { const result = serialize({ a: -Infinity }); assert.deepEqual(result, { a: "-INF" }); }); - it("GeographyPoint", function () { + it("GeographyPoint", () => { const result = serialize({ location: new GeographyPoint({ latitude: 37.989769, longitude: -84.527771 }), }); @@ -72,27 +71,23 @@ describe("serialization.serialize", function () { }; assert.deepEqual(result, expect); }); - - afterEach(function () { - sinon.restore(); - }); }); -describe("serialization.deserialize", function () { - it("nested", function () { +describe("serialization.deserialize", () => { + it("nested", () => { const nestedInput = { a: { b: { c: { d: [42] } } } }; const result = deserialize(nestedInput); assert.deepEqual(nestedInput, result); }); - it("circular", function () { + it("circular", () => { const circularInput: any = { a: null }; circularInput.a = circularInput; const result = deserialize(circularInput); assert.deepEqual(circularInput, result); }); - it("recursive 1", function () { + it("recursive 1", () => { const child = { hello: "world" }; const documents = [ { id: "1", children: [child] }, @@ -102,7 +97,7 @@ describe("serialization.deserialize", function () { assert.deepEqual(documents, result); }); - it("recursive 2", function () { + it("recursive 2", () => { const child = { hello: "INF", world: "-INF", universe: "NaN" }; const expectChild = { hello: Infinity, world: -Infinity, universe: NaN }; const documents = [ @@ -116,50 +111,50 @@ describe("serialization.deserialize", function () { assert.deepEqual(result, expect); }); - it("NaN", function () { + it("NaN", () => { const result = deserialize({ a: "NaN" }); assert.deepEqual(result, { a: NaN }); }); - it("Infinity", function () { + it("Infinity", () => { const result = deserialize({ a: "INF" }); assert.deepEqual(result, { a: Infinity }); }); - it("Negative Infinity", function () { + it("Negative Infinity", () => { const result = deserialize({ a: "-INF" }); assert.deepEqual(result, { a: -Infinity }); }); - it("Date", function () { + it("Date", () => { const result = deserialize({ a: "1975-04-04T00:00:00.000Z" }); assert.deepEqual(result, { a: new Date(Date.UTC(1975, 3, 4)) }); }); - it("Date with truncated ms field", function () { + it("Date with truncated ms field", () => { const result = deserialize({ a: "1975-04-04T00:00:00.0Z" }); assert.deepEqual(result, { a: new Date(Date.UTC(1975, 3, 4)) }); }); - it("doesn't deserialize as Date if text before", function () { + it("doesn't deserialize as Date if text before", () => { const value = "before 1975-04-04T00:00:00.000Z"; const result = deserialize({ a: value }); assert.deepEqual(result, { a: value }); }); - it("doesn't deserialize as Date if text after", function () { + it("doesn't deserialize as Date if text after", () => { const value = "1975-04-04T00:00:00.000Z after"; const result = deserialize({ a: value }); assert.deepEqual(result, { a: value }); }); - it("doesn't deserialize as Date if text before and after", function () { + it("doesn't deserialize as Date if text before and after", () => { const value = "before 1975-04-04T00:00:00.000Z after"; const result = deserialize({ a: value }); assert.deepEqual(result, { a: value }); }); - it("GeographyPoint", function () { + it("GeographyPoint", () => { const result: { location: GeographyPoint } = deserialize({ location: { type: "Point", @@ -171,8 +166,4 @@ describe("serialization.deserialize", function () { assert.equal(result.location.latitude, 37.989769); assert.equal(result.location.longitude, -84.527771); }); - - afterEach(function () { - sinon.restore(); - }); }); diff --git a/sdk/search/search-documents/test/internal/serviceUtils.spec.ts b/sdk/search/search-documents/test/internal/serviceUtils.spec.ts index 64e205a37ffc..6bab6cd37d84 100644 --- a/sdk/search/search-documents/test/internal/serviceUtils.spec.ts +++ b/sdk/search/search-documents/test/internal/serviceUtils.spec.ts @@ -1,14 +1,13 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. +import type { SearchField as GeneratedSearchField } from "../../src/generated/service/models/index.js"; +import { KnownAnalyzerNames } from "../../src/index.js"; +import type { ComplexField, SearchField } from "../../src/serviceModels.js"; +import { convertFieldsToGenerated, convertFieldsToPublic } from "../../src/serviceUtils.js"; +import { describe, it, assert } from "vitest"; -import { assert } from "chai"; -import { SearchField as GeneratedSearchField } from "../../src/generated/service/models/index"; -import { KnownAnalyzerNames } from "../../src/index"; -import { ComplexField, SearchField } from "../../src/serviceModels"; -import { convertFieldsToGenerated, convertFieldsToPublic } from "../../src/serviceUtils"; - -describe("serviceUtils", function () { - it("convert generated fields to public fields", function () { +describe("serviceUtils", () => { + it("convert generated fields to public fields", () => { const publicFields: SearchField[] = convertFieldsToPublic([ { name: "id", @@ -21,6 +20,7 @@ describe("serviceUtils", function () { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, + normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }, @@ -37,12 +37,13 @@ describe("serviceUtils", function () { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, + normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }); }); - it("convert generated fields (complex) to public fields", function () { + it("convert generated fields (complex) to public fields", () => { const publicFields: SearchField[] = convertFieldsToPublic([ { name: "ComplexObj", @@ -59,6 +60,7 @@ describe("serviceUtils", function () { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, + normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }, @@ -82,13 +84,14 @@ describe("serviceUtils", function () { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, + normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }); }); - it("convert public fields to generated fields", function () { - const generatedFields: GeneratedSearchField[] = convertFieldsToGenerated([ + it("convert public fields to generated fields", () => { + const generatedFields: GeneratedSearchField[] | undefined = convertFieldsToGenerated([ { name: "id", key: true, @@ -100,12 +103,13 @@ describe("serviceUtils", function () { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, + normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }, ]); - assert.include(generatedFields[0], { + assert.include(generatedFields?.[0], { name: "id", key: true, type: "Edm.String", @@ -116,13 +120,14 @@ describe("serviceUtils", function () { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, + normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }); }); - it("convert public fields (complex) to generated fields", function () { - const generatedFields: GeneratedSearchField[] = convertFieldsToGenerated([ + it("convert public fields (complex) to generated fields", () => { + const generatedFields: GeneratedSearchField[] | undefined = convertFieldsToGenerated([ { name: "ComplexObj", type: "Edm.ComplexType", @@ -138,6 +143,7 @@ describe("serviceUtils", function () { hidden: true, analyzerName: KnownAnalyzerNames.ArMicrosoft, indexAnalyzerName: KnownAnalyzerNames.ArLucene, + normalizerName: KnownAnalyzerNames.BgLucene, searchAnalyzerName: KnownAnalyzerNames.CaLucene, synonymMapNames: undefined, }, @@ -145,12 +151,12 @@ describe("serviceUtils", function () { }, ]); - assert.include(generatedFields[0], { + assert.include(generatedFields?.[0], { name: "ComplexObj", type: "Edm.ComplexType", }); - assert.include(generatedFields[0].fields![0], { + assert.include(generatedFields?.[0].fields![0], { name: "id", key: true, type: "Edm.String", @@ -161,6 +167,7 @@ describe("serviceUtils", function () { retrievable: false, analyzer: KnownAnalyzerNames.ArMicrosoft, indexAnalyzer: KnownAnalyzerNames.ArLucene, + normalizer: KnownAnalyzerNames.BgLucene, searchAnalyzer: KnownAnalyzerNames.CaLucene, synonymMaps: undefined, }); diff --git a/sdk/search/search-documents/test/narrowedTypes.ts b/sdk/search/search-documents/test/narrowedTypes.ts index 47f2e86a5db2..63400f1a79a4 100644 --- a/sdk/search/search-documents/test/narrowedTypes.ts +++ b/sdk/search/search-documents/test/narrowedTypes.ts @@ -1,20 +1,19 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. /* eslint-disable no-unused-expressions */ -/* eslint-disable no-constant-condition */ /* eslint-disable @typescript-eslint/ban-ts-comment */ /* eslint-disable @typescript-eslint/explicit-function-return-type */ -/* eslint-disable @typescript-eslint/no-unused-vars */ -import { SearchClient, SelectFields } from "../src/index"; -import { +import type { SelectFields } from "../src/index.js"; +import { SearchClient } from "../src/index.js"; +import type { NarrowedModel as GenericNarrowedModel, SearchFieldArray, SearchPick, SelectArray, SuggestNarrowedModel, -} from "../src/indexModels"; +} from "../src/indexModels.js"; type Equals = (() => T extends T1 ? true : false) extends () => T extends T2 ? true : false ? any : never; @@ -56,8 +55,9 @@ function testSelectFields() { const a: Equals, string> = "pass"; const b: Equals, string> = "pass"; const c: Equals, string> = "pass"; + // Should pass as the fields are narrowed to the model fields + // @ts-expect-error const d: Equals, ModelFields> = "pass"; - // SelectFields should be an error, as unknown should be cast // @ts-expect-error const e: Equals, string> = "fail"; @@ -210,6 +210,8 @@ function testSelectArray() { // @ts-expect-error function testSearchFieldArray() { const a: Equals, readonly string[]> = "pass"; + // Should pass as the fields are narrowed to the model fields + // @ts-expect-error const b: Equals, readonly ModelFields[]> = "pass"; const c: Equals, readonly string[]> = "pass"; const d: Equals, readonly string[]> = "pass"; @@ -251,6 +253,8 @@ function testNarrowedClient() { >["queries"] >[number]["fields"] >; + // Should pass as the fields are narrowed to the model fields + // @ts-expect-error const a: Equals = "pass"; return a; }; diff --git a/sdk/search/search-documents/test/public/node/searchClient.spec.ts b/sdk/search/search-documents/test/public/node/searchClient.spec.ts index 70ce9aa4c9eb..718ab5cb247a 100644 --- a/sdk/search/search-documents/test/public/node/searchClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchClient.spec.ts @@ -1,31 +1,32 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { env, isLiveMode, Recorder } from "@azure-tools/test-recorder"; -import { assert } from "chai"; -import { Context, Suite } from "mocha"; - import { delay } from "@azure/core-util"; -import { OpenAIClient } from "@azure/openai"; -import { +import type { OpenAIClient } from "@azure/openai"; +import type { AutocompleteResult, - AzureKeyCredential, - IndexDocumentsBatch, - SearchClient, + SearchFieldArray, SearchIndex, SearchIndexClient, + SelectArray, SelectFields, -} from "../../../src"; -import { SearchFieldArray, SelectArray } from "../../../src/indexModels"; -import { defaultServiceVersion } from "../../../src/serviceUtils"; -import { Hotel } from "../utils/interfaces"; -import { createClients } from "../utils/recordedClient"; -import { createIndex, createRandomIndexName, populateIndex, WAIT_TIME } from "../utils/setup"; - -describe("SearchClient", function (this: Suite) { - this.timeout(20_000); - - describe("constructor", function () { +} from "../../../src/index.js"; +import { + AzureKeyCredential, + IndexDocumentsBatch, + KnownQueryLanguage, + KnownQuerySpeller, + SearchClient, +} from "../../../src/index.js"; +import { defaultServiceVersion } from "../../../src/serviceUtils.js"; +import type { Hotel } from "../utils/interfaces.js"; +import { createClients } from "../utils/recordedClient.js"; +import { createIndex, createRandomIndexName, populateIndex, WAIT_TIME } from "../utils/setup.js"; +import { describe, it, assert, beforeEach, afterEach } from "vitest"; + +describe("SearchClient", { timeout: 20_000 }, () => { + describe("constructor", () => { const credential = new AzureKeyCredential("key"); describe("Passing serviceVersion", () => { @@ -63,7 +64,8 @@ describe("SearchClient", function (this: Suite) { }); }); - describe("stable", function () { + // TODO: the preview-only tests are mixed in here when they should be in another describe (and removed in the stable release branch) + describe("stable", { skip: true }, () => { let recorder: Recorder; let searchClient: SearchClient; let indexClient: SearchIndexClient; @@ -71,8 +73,8 @@ describe("SearchClient", function (this: Suite) { let TEST_INDEX_NAME: string; let indexDefinition: SearchIndex; - beforeEach(async function (this: Context) { - recorder = new Recorder(this.currentTest); + beforeEach(async (ctx) => { + recorder = new Recorder(ctx); TEST_INDEX_NAME = createRandomIndexName(); ({ searchClient, @@ -85,24 +87,123 @@ describe("SearchClient", function (this: Suite) { await populateIndex(searchClient, openAIClient); }); - afterEach(async function () { + afterEach(async () => { await indexClient.deleteIndex(TEST_INDEX_NAME); await delay(WAIT_TIME); await recorder?.stop(); }); - it("count returns the correct document count", async function () { + const baseSemanticOptions = () => + ({ + queryLanguage: KnownQueryLanguage.EnUs, + queryType: "semantic", + semanticSearchOptions: { + configurationName: + indexDefinition.semanticSearch?.configurations?.[0].name ?? + assert.fail("No semantic configuration in index."), + }, + }) as const; + + it("search with speller", async () => { + const searchResults = await searchClient.search("budjet", { + skip: 0, + top: 5, + includeTotalCount: true, + queryLanguage: KnownQueryLanguage.EnUs, + speller: KnownQuerySpeller.Lexicon, + }); + assert.equal(searchResults.count, 6); + }); + + it("search with semantic ranking", async () => { + const searchResults = await searchClient.search("luxury", { + ...baseSemanticOptions(), + skip: 0, + top: 5, + includeTotalCount: true, + }); + assert.equal(searchResults.count, 1); + }); + + it("search with document debug info", async () => { + const baseOptions = baseSemanticOptions(); + const options = { + ...baseOptions, + semanticSearchOptions: { + ...baseOptions.semanticSearchOptions, + errorMode: "fail", + debugMode: "semantic", + }, + } as const; + const searchResults = await searchClient.search("luxury", options); + for await (const result of searchResults.results) { + assert.deepEqual( + { + semantic: { + contentFields: [ + { + name: "description", + state: "used", + }, + ], + keywordFields: [ + { + name: "tags", + state: "used", + }, + ], + rerankerInput: { + content: + "Best hotel in town if you like luxury hotels. They have an amazing infinity pool, a spa, and a really helpful concierge. The location is perfect -- right downtown, close to all the tourist attractions. We highly recommend this hotel.", + keywords: "pool\r\nview\r\nwifi\r\nconcierge", + title: "Fancy Stay", + }, + titleField: { + name: "hotelName", + state: "used", + }, + }, + }, + result.documentDebugInfo, + ); + } + }); + + it("search with answers", async () => { + const baseOptions = baseSemanticOptions(); + const options = { + ...baseOptions, + semanticSearchOptions: { + ...baseOptions.semanticSearchOptions, + answers: { answerType: "extractive", count: 3, threshold: 0.7 }, + }, + top: 3, + select: ["hotelId"], + } as const; + const searchResults = await searchClient.search( + "What are the most luxurious hotels?", + options, + ); + + const resultIds = []; + for await (const result of searchResults.results) { + resultIds.push(result.document.hotelId); + } + assert.deepEqual(["1", "9", "3"], resultIds); + }); + + it("count returns the correct document count", async () => { const documentCount = await searchClient.getDocumentsCount(); assert.equal(documentCount, 10); }); - it("autocomplete returns the correct autocomplete result", async function () { + it("autocomplete returns the correct autocomplete result", async () => { const autoCompleteResult: AutocompleteResult = await searchClient.autocomplete("sec", "sg"); assert.equal(autoCompleteResult.results.length, 1); assert.equal(autoCompleteResult.results[0].text, "secret"); }); - it("autocomplete returns zero results for invalid query", async function () { + it("autocomplete returns zero results for invalid query", async () => { const autoCompleteResult: AutocompleteResult = await searchClient.autocomplete( "garbxyz", "sg", @@ -110,7 +211,7 @@ describe("SearchClient", function (this: Suite) { assert.isTrue(autoCompleteResult.results.length === 0); }); - it("search returns the correct search result", async function () { + it("search returns the correct search result", async () => { const searchResults = await searchClient.search("budget", { skip: 0, top: 5, @@ -120,9 +221,10 @@ describe("SearchClient", function (this: Suite) { assert.equal(searchResults.count, 6); }); - it("search narrows the result type", async function () { - // eslint-disable-next-line no-constant-condition - if (false) { + it("search narrows the result type", async () => { + // This part of the test is only for types. This doesn't need to be called. + // eslint-disable-next-line no-unused-expressions + async () => { const response = await searchClient.search("asdf", { select: ["address/city"], }); @@ -131,7 +233,7 @@ describe("SearchClient", function (this: Suite) { // @ts-expect-error result.document.category = ""; } - } + }; const hotelKeys: (keyof Hotel)[] = [ "address", @@ -236,7 +338,7 @@ describe("SearchClient", function (this: Suite) { await Promise.all(searchFieldsTestPromises); }); - it("search returns zero results for invalid query", async function () { + it("search returns zero results for invalid query", async () => { const searchResults = await searchClient.search("garbxyz", { skip: 0, top: 5, @@ -245,7 +347,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(searchResults.count, 0); }); - it("suggest returns the correct suggestions", async function () { + it("suggest returns the correct suggestions", async () => { const suggestResult = await searchClient.suggest("WiFi", "sg"); assert.equal(suggestResult.results.length, 1); assert.isTrue( @@ -253,12 +355,12 @@ describe("SearchClient", function (this: Suite) { ); }); - it("suggest returns zero suggestions for invalid input", async function () { + it("suggest returns zero suggestions for invalid input", async () => { const suggestResult = await searchClient.suggest("garbxyz", "sg"); assert.equal(suggestResult.results.length, 0); }); - it("getDocument returns the correct document result", async function () { + it("getDocument returns the correct document result", async () => { const getDocumentResult = await searchClient.getDocument("8"); assert.equal( getDocumentResult.description, @@ -271,7 +373,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(getDocumentResult.hotelId, "8"); }); - it("getDocument throws error for invalid getDocument Value", async function () { + it("getDocument throws error for invalid getDocument Value", async () => { let errorThrown = false; try { await searchClient.getDocument("garbxyz"); @@ -281,7 +383,7 @@ describe("SearchClient", function (this: Suite) { assert.isTrue(errorThrown, "Expected getDocument to fail with an exception"); }); - it("deleteDocuments delete a document by documents", async function () { + it("deleteDocuments delete a document by documents", async () => { const getDocumentResult = await searchClient.getDocument("8"); await searchClient.deleteDocuments([getDocumentResult]); await delay(WAIT_TIME); @@ -289,14 +391,14 @@ describe("SearchClient", function (this: Suite) { assert.equal(documentCount, 9); }); - it("deleteDocuments delete a document by key/keyNames", async function () { + it("deleteDocuments delete a document by key/keyNames", async () => { await searchClient.deleteDocuments("hotelId", ["9", "10"]); await delay(WAIT_TIME); const documentCount = await searchClient.getDocumentsCount(); assert.equal(documentCount, 8); }); - it("mergeOrUploadDocuments modify & merge an existing document", async function () { + it("mergeOrUploadDocuments modify & merge an existing document", async () => { let getDocumentResult = await searchClient.getDocument("6"); getDocumentResult.description = "Modified Description"; await searchClient.mergeOrUploadDocuments([getDocumentResult]); @@ -305,7 +407,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(getDocumentResult.description, "Modified Description"); }); - it("mergeOrUploadDocuments merge a new document", async function () { + it("mergeOrUploadDocuments merge a new document", async () => { const document = { hotelId: "11", description: "New Hotel Description", @@ -317,7 +419,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(documentCount, 11); }); - it("mergeDocuments modify & merge an existing document", async function () { + it("mergeDocuments modify & merge an existing document", async () => { let getDocumentResult = await searchClient.getDocument("6"); getDocumentResult.description = "Modified Description"; await searchClient.mergeDocuments([getDocumentResult]); @@ -326,7 +428,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(getDocumentResult.description, "Modified Description"); }); - it("uploadDocuments upload a set of documents", async function () { + it("uploadDocuments upload a set of documents", async () => { const documents = [ { hotelId: "11", @@ -345,7 +447,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(documentCount, 12); }); - it("indexDocuments upload a new document", async function () { + it("indexDocuments upload a new document", async () => { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.upload([ { @@ -360,7 +462,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(documentCount, 11); }); - it("indexDocuments deletes existing documents", async function () { + it("indexDocuments deletes existing documents", async () => { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.delete([ { @@ -377,7 +479,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(documentCount, 8); }); - it("indexDocuments merges an existing document", async function () { + it("indexDocuments merges an existing document", async () => { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.merge([ { @@ -392,7 +494,7 @@ describe("SearchClient", function (this: Suite) { assert.equal(getDocumentResult.description, "Modified Description"); }); - it("indexDocuments merge/upload documents", async function () { + it("indexDocuments merge/upload documents", async () => { const batch: IndexDocumentsBatch = new IndexDocumentsBatch(); batch.mergeOrUpload([ { @@ -414,50 +516,9 @@ describe("SearchClient", function (this: Suite) { assert.equal(documentCount, 11); }); - it("search with semantic ranking", async function () { + it("search with semantic error handling", async () => { const searchResults = await searchClient.search("luxury", { - skip: 0, - top: 5, - includeTotalCount: true, - queryType: "semantic", - semanticSearchOptions: { - configurationName: - indexDefinition.semanticSearch?.configurations?.[0].name ?? - assert.fail("No semantic configuration in index."), - }, - }); - assert.equal(searchResults.count, 1); - }); - - it("search with answers", async function () { - const searchResults = await searchClient.search("What are the most luxurious hotels?", { - queryType: "semantic", - semanticSearchOptions: { - configurationName: - indexDefinition.semanticSearch?.configurations?.[0].name ?? - assert.fail("No semantic configuration in index."), - answers: { answerType: "extractive", count: 3, threshold: 0.7 }, - }, - top: 3, - select: ["hotelId"], - }); - - const resultIds = []; - for await (const result of searchResults.results) { - resultIds.push(result.document.hotelId); - } - assert.deepEqual(["1", "9", "3"], resultIds); - }); - - it("search with semantic error handling", async function () { - const searchResults = await searchClient.search("luxury", { - queryType: "semantic", - semanticSearchOptions: { - configurationName: - indexDefinition.semanticSearch?.configurations?.[0].name ?? - assert.fail("No semantic configuration in index."), - errorMode: "partial", - }, + ...baseSemanticOptions(), select: ["hotelId"], }); @@ -468,10 +529,10 @@ describe("SearchClient", function (this: Suite) { assert.deepEqual(["1"], resultIds); }); - it("search with vector", async function () { + it("search with vector", async (ctx) => { // This live test is disabled due to temporary limitations with the new OpenAI service if (isLiveMode()) { - this.skip(); + ctx.skip(); } const embeddings = await openAIClient.getEmbeddings( env.AZURE_OPENAI_DEPLOYMENT_NAME ?? "deployment-name", @@ -502,10 +563,10 @@ describe("SearchClient", function (this: Suite) { assert.deepEqual(resultIds, ["1", "3", "4"]); }); - it("multi-vector search", async function () { + it("multi-vector search", async (ctx) => { // This live test is disabled due to temporary limitations with the new OpenAI service if (isLiveMode()) { - this.skip(); + ctx.skip(); } const embeddings = await openAIClient.getEmbeddings( env.AZURE_OPENAI_DEPLOYMENT_NAME ?? "deployment-name", @@ -542,10 +603,10 @@ describe("SearchClient", function (this: Suite) { assert.deepEqual(resultIds, ["1", "3", "4"]); }); - it("oversampling compressed vectors", async function () { + it("oversampling compressed vectors", async (ctx) => { // This live test is disabled due to temporary limitations with the new OpenAI service if (isLiveMode()) { - this.skip(); + ctx.skip(); } const embeddings = await openAIClient.getEmbeddings( diff --git a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts index 0f01c72fe061..b3d34cfc1c5e 100644 --- a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts @@ -1,34 +1,30 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { env, isLiveMode, Recorder } from "@azure-tools/test-recorder"; import { delay } from "@azure/core-util"; -import { assert } from "chai"; -import { Context, Suite } from "mocha"; -import { - AzureKeyCredential, +import type { AzureOpenAIVectorizer, SearchIndex, - SearchIndexClient, SynonymMap, VectorSearchAlgorithmConfiguration, VectorSearchProfile, -} from "../../../src"; -import { defaultServiceVersion } from "../../../src/serviceUtils"; -import { Hotel } from "../utils/interfaces"; -import { createClients } from "../utils/recordedClient"; +} from "../../../src/index.js"; +import { AzureKeyCredential, SearchIndexClient } from "../../../src/index.js"; +import { defaultServiceVersion } from "../../../src/serviceUtils.js"; +import type { Hotel } from "../utils/interfaces.js"; +import { createClients } from "../utils/recordedClient.js"; import { createRandomIndexName, createSimpleIndex, createSynonymMaps, deleteSynonymMaps, WAIT_TIME, -} from "../utils/setup"; - -describe("SearchIndexClient", function (this: Suite) { - this.timeout(20_000); +} from "../utils/setup.js"; +import { describe, it, assert, beforeEach, afterEach } from "vitest"; - describe("constructor", function () { +describe("SearchIndexClient", { timeout: 20_000 }, () => { + describe("constructor", () => { const credential = new AzureKeyCredential("key"); describe("Passing serviceVersion", () => { @@ -66,13 +62,13 @@ describe("SearchIndexClient", function (this: Suite) { }); }); - describe("stable", function () { + describe("stable", { skip: true }, () => { let recorder: Recorder; let indexClient: SearchIndexClient; let TEST_INDEX_NAME: string; - beforeEach(async function (this: Context) { - recorder = new Recorder(this.currentTest); + beforeEach(async (ctx) => { + recorder = new Recorder(ctx); TEST_INDEX_NAME = createRandomIndexName(); ({ indexClient, indexName: TEST_INDEX_NAME } = await createClients( defaultServiceVersion, @@ -85,20 +81,20 @@ describe("SearchIndexClient", function (this: Suite) { await delay(WAIT_TIME); }); - afterEach(async function () { + afterEach(async () => { await indexClient.deleteIndex(TEST_INDEX_NAME); await delay(WAIT_TIME); await deleteSynonymMaps(indexClient); await recorder?.stop(); }); - describe("#synonymmaps", function () { - it("gets the list of synonymmaps", async function () { + describe("#synonymmaps", () => { + it("gets the list of synonymmaps", async () => { const synonymMaps = await indexClient.listSynonymMaps(); assert.isAtLeast(synonymMaps.length, 2); }); - it("gets the list of synonymmaps names", async function () { + it("gets the list of synonymmaps names", async () => { const synonymMapNames = await indexClient.listSynonymMapsNames(); assert.isAtLeast(synonymMapNames.length, 2); for (let i = 1; i <= 2; i++) { @@ -106,7 +102,7 @@ describe("SearchIndexClient", function (this: Suite) { } }); - it("gets the correct synonymmap object", async function () { + it("gets the correct synonymmap object", async () => { const synonymMap = await indexClient.getSynonymMap("my-azure-synonymmap-1"); assert.equal(synonymMap.name, "my-azure-synonymmap-1"); assert.equal(synonymMap.synonyms.length, 2); @@ -118,7 +114,7 @@ describe("SearchIndexClient", function (this: Suite) { assert.include(synonyms, synonymMap.synonyms[1]); }); - it("throws error for invalid synonymmap object", async function () { + it("throws error for invalid synonymmap object", async () => { let retrievalError: boolean = false; try { await indexClient.getSynonymMap("garbxyz"); @@ -128,7 +124,7 @@ describe("SearchIndexClient", function (this: Suite) { assert.isTrue(retrievalError); }); - it("creates the synonymmap object using createOrUpdateSynonymMap", async function () { + it("creates the synonymmap object using createOrUpdateSynonymMap", async () => { let synonymMap: SynonymMap = { name: `my-azure-synonymmap-3`, synonyms: ["United States, United States of America => USA", "Washington, Wash. => WA"], @@ -149,7 +145,7 @@ describe("SearchIndexClient", function (this: Suite) { } }); - it("modify and updates the synonymmap object", async function () { + it("modify and updates the synonymmap object", async () => { let synonymMap = await indexClient.getSynonymMap("my-azure-synonymmap-1"); synonymMap.synonyms.push("California, Clif. => CA"); await indexClient.createOrUpdateSynonymMap(synonymMap); @@ -166,8 +162,8 @@ describe("SearchIndexClient", function (this: Suite) { }); }); - describe("#indexes", function () { - it("gets the list of indexes", async function () { + describe("#indexes", () => { + it("gets the list of indexes", async () => { const result = await indexClient.listIndexes(); let listOfIndexes = await result.next(); const indexNames: string[] = []; @@ -178,7 +174,7 @@ describe("SearchIndexClient", function (this: Suite) { assert.include(indexNames, TEST_INDEX_NAME); }); - it("gets the list of indexes names", async function () { + it("gets the list of indexes names", async () => { const result = await indexClient.listIndexesNames(); let listOfIndexNames = await result.next(); const indexNames: string[] = []; @@ -189,13 +185,13 @@ describe("SearchIndexClient", function (this: Suite) { assert.include(indexNames, TEST_INDEX_NAME); }); - it("gets the correct index object", async function () { + it("gets the correct index object", async () => { const index = await indexClient.getIndex(TEST_INDEX_NAME); assert.equal(index.name, TEST_INDEX_NAME); assert.equal(index.fields.length, 5); }); - it("throws error for invalid index object", async function () { + it("throws error for invalid index object", async () => { let retrievalError: boolean = false; try { await indexClient.getIndex("garbxyz"); @@ -205,7 +201,7 @@ describe("SearchIndexClient", function (this: Suite) { assert.isTrue(retrievalError); }); - it("creates the index object using createOrUpdateIndex", async function () { + it("creates the index object using createOrUpdateIndex", async () => { const indexName: string = isLiveMode() ? createRandomIndexName() : "hotel-live-test4"; let index: SearchIndex = { name: indexName, @@ -255,7 +251,7 @@ describe("SearchIndexClient", function (this: Suite) { } }); - it("modify and updates the index object", async function () { + it("modify and updates the index object", async () => { let index = await indexClient.getIndex(TEST_INDEX_NAME); index.fields.push({ type: "Edm.DateTimeOffset", @@ -268,7 +264,7 @@ describe("SearchIndexClient", function (this: Suite) { }); }); - it("creates the index object vector fields", async function () { + it("creates the index object vector fields", async () => { const indexName: string = isLiveMode() ? createRandomIndexName() : "hotel-live-test4"; const algorithm: VectorSearchAlgorithmConfiguration = { diff --git a/sdk/search/search-documents/test/public/odata.spec.ts b/sdk/search/search-documents/test/public/odata.spec.ts index e939de63fcba..d2545cea382c 100644 --- a/sdk/search/search-documents/test/public/odata.spec.ts +++ b/sdk/search/search-documents/test/public/odata.spec.ts @@ -1,9 +1,7 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { assert } from "chai"; -import * as sinon from "sinon"; -import { odata } from "../../src"; +// Licensed under the MIT License. +import { odata } from "../../src/index.js"; +import { describe, it, assert } from "vitest"; describe("odata", function () { it("simple string isn't changed", function () { @@ -81,8 +79,4 @@ describe("odata", function () { assert.strictEqual(odata`Foo eq ${"bar's"}`, "Foo eq 'bar''s'"); assert.strictEqual(odata`Foo eq ${'"bar"'}`, "Foo eq '\"bar\"'"); }); - - afterEach(function () { - sinon.restore(); - }); }); diff --git a/sdk/search/search-documents/test/public/typeDefinitions.ts b/sdk/search/search-documents/test/public/typeDefinitions.ts index 3afd59df59d6..7bb7816d0255 100644 --- a/sdk/search/search-documents/test/public/typeDefinitions.ts +++ b/sdk/search/search-documents/test/public/typeDefinitions.ts @@ -1,16 +1,14 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. -/* eslint-disable @typescript-eslint/no-unused-vars */ - -import { +import type { KnownSemanticErrorMode, KnownSemanticErrorReason, KnownSemanticSearchResultsType, KnownVectorFilterMode, KnownVectorQueryKind, -} from "../../src/generated/data"; -import { +} from "../../src/generated/data/index.js"; +import type { KnownBlobIndexerDataToExtract, KnownBlobIndexerImageAction, KnownBlobIndexerParsingMode, @@ -34,7 +32,7 @@ import { KnownVectorSearchAlgorithmKind, KnownVectorSearchAlgorithmMetric, KnownVisualFeature, -} from "../../src/generated/service"; +} from "../../src/generated/service/index.js"; import type { IsEqual } from "type-plus"; @@ -81,6 +79,7 @@ type BlobIndexerParsingMode = | "json" | "jsonArray" | "jsonLines" + | "markdown" | "text"; type BlobIndexerPDFTextRotationAlgorithm = "detectAngles" | "none"; type CustomEntityLookupSkillLanguage = "da" | "de" | "en" | "es" | "fi" | "fr" | "it" | "ko" | "pt"; @@ -396,7 +395,8 @@ type SearchIndexerDataSourceType = | "azuresql" | "azuretable" | "cosmosdb" - | "mysql"; + | "mysql" + | "onelake"; type SemanticErrorMode = "fail" | "partial"; type SemanticErrorReason = "capacityOverloaded" | "maxWaitExceeded" | "transient"; type SemanticSearchResultsType = "baseResults" | "rerankedResults"; @@ -525,7 +525,7 @@ type TextTranslationSkillLanguage = | "zh-Hans" | "zh-Hant"; type VectorFilterMode = "postFilter" | "preFilter"; -type VectorQueryKind = "text" | "vector"; +type VectorQueryKind = "imageBinary" | "imageUrl" | "text" | "vector"; type VectorSearchAlgorithmKind = "exhaustiveKnn" | "hnsw"; type VectorSearchAlgorithmMetric = "cosine" | "dotProduct" | "euclidean" | "hamming"; type VisualFeature = diff --git a/sdk/search/search-documents/test/public/utils/interfaces.ts b/sdk/search/search-documents/test/public/utils/interfaces.ts index cbf59ad1d666..4b18b3bcd707 100644 --- a/sdk/search/search-documents/test/public/utils/interfaces.ts +++ b/sdk/search/search-documents/test/public/utils/interfaces.ts @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. -import { GeographyPoint } from "../../../src"; +import type { GeographyPoint } from "../../../src/index.js"; export interface Hotel { hotelId: string; diff --git a/sdk/search/search-documents/test/public/utils/recordedClient.ts b/sdk/search/search-documents/test/public/utils/recordedClient.ts index e843f1b005dd..0132e8edc71b 100644 --- a/sdk/search/search-documents/test/public/utils/recordedClient.ts +++ b/sdk/search/search-documents/test/public/utils/recordedClient.ts @@ -1,17 +1,12 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { createTestCredential } from "@azure-tools/test-credential"; -import { - assertEnvironmentVariable, - env, - Recorder, - RecorderStartOptions, - SanitizerOptions, -} from "@azure-tools/test-recorder"; +import type { Recorder, RecorderStartOptions, SanitizerOptions } from "@azure-tools/test-recorder"; +import { assertEnvironmentVariable, env } from "@azure-tools/test-recorder"; import { isDefined } from "@azure/core-util"; import { OpenAIClient } from "@azure/openai"; -import { SearchClient, SearchIndexClient, SearchIndexerClient } from "../../../src"; +import { SearchClient, SearchIndexClient, SearchIndexerClient } from "../../../src/index.js"; export interface Clients { searchClient: SearchClient; @@ -54,6 +49,7 @@ function createRecorderStartOptions(): RecorderStartOptions { }; return { envSetupForPlayback, + removeCentralSanitizers: ["AZSDK2021", "AZSDK3493"], sanitizerOptions: { generalSanitizers, bodyKeySanitizers: [bodyKeySanitizer], diff --git a/sdk/search/search-documents/test/public/utils/setup.ts b/sdk/search/search-documents/test/public/utils/setup.ts index 6680ae288d51..fbe9553aa5ee 100644 --- a/sdk/search/search-documents/test/public/utils/setup.ts +++ b/sdk/search/search-documents/test/public/utils/setup.ts @@ -1,13 +1,10 @@ // Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. +// Licensed under the MIT License. import { assertEnvironmentVariable, isLiveMode, isPlaybackMode } from "@azure-tools/test-recorder"; import { computeSha256Hash, delay, isDefined } from "@azure/core-util"; -import { OpenAIClient } from "@azure/openai"; -import { assert } from "chai"; -import { - GeographyPoint, - KnownAnalyzerNames, +import type { OpenAIClient } from "@azure/openai"; +import type { SearchClient, SearchField, SearchIndex, @@ -15,13 +12,15 @@ import { SearchIndexerClient, VectorSearchAlgorithmConfiguration, VectorSearchCompression, + VectorSearchProfile, VectorSearchVectorizer, -} from "../../../src"; -import { Hotel } from "./interfaces"; +} from "../../../src/index.js"; +import { GeographyPoint, KnownAnalyzerNames } from "../../../src/index.js"; +import type { Hotel } from "./interfaces.js"; +import { assert } from "vitest"; export const WAIT_TIME = isPlaybackMode() ? 0 : 4000; -// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function createIndex( client: SearchIndexClient, name: string, @@ -40,7 +39,7 @@ export async function createIndex( }, }, ]; - await Promise.all(vectorizers.map(renameUniquelyInPlace("vectorizerName"))); + await Promise.all(vectorizers.map((v) => renameUniquelyInPlace("vectorizerName", v))); const [azureOpenAiVectorizerName] = vectorizers.map((v) => v.vectorizerName); const algorithmConfigurations: VectorSearchAlgorithmConfiguration[] = [ @@ -55,7 +54,7 @@ export async function createIndex( parameters: { metric: "euclidean" }, }, ]; - await Promise.all(algorithmConfigurations.map(renameUniquelyInPlace("name"))); + await Promise.all(algorithmConfigurations.map((c) => renameUniquelyInPlace("name", c))); const [hnswAlgorithmConfigurationName, exhaustiveKnnAlgorithmConfigurationName] = algorithmConfigurations.map((c) => c.name); @@ -67,27 +66,27 @@ export async function createIndex( rerankWithOriginalVectors: true, }, ]; - await Promise.all(compressionConfigurations.map(renameUniquelyInPlace("compressionName"))); + await Promise.all( + compressionConfigurations.map((c) => renameUniquelyInPlace("compressionName", c)), + ); const [scalarQuantizationCompressionConfigurationName] = compressionConfigurations.map( (c) => c.compressionName, ); - const vectorSearchProfiles = [ + const vectorSearchProfiles: VectorSearchProfile[] = [ { name: "vector-search-profile", - vectorizer: isPreview ? azureOpenAiVectorizerName : undefined, + vectorizerName: isPreview ? azureOpenAiVectorizerName : undefined, algorithmConfigurationName: exhaustiveKnnAlgorithmConfigurationName, }, { name: "vector-search-profile", - vectorizer: isPreview ? azureOpenAiVectorizerName : undefined, + vectorizerName: isPreview ? azureOpenAiVectorizerName : undefined, algorithmConfigurationName: hnswAlgorithmConfigurationName, - compressionConfigurationName: isPreview - ? scalarQuantizationCompressionConfigurationName - : undefined, + compressionName: isPreview ? scalarQuantizationCompressionConfigurationName : undefined, }, ]; - await Promise.all(vectorSearchProfiles.map(renameUniquelyInPlace("name"))); + await Promise.all(vectorSearchProfiles.map((p) => renameUniquelyInPlace("name", p))); const [azureOpenAiVectorSearchProfileName, azureOpenAiCompressedVectorSearchProfileName] = vectorSearchProfiles.map((p) => p.name); @@ -344,7 +343,6 @@ export async function createIndex( return client.createIndex(hotelIndex); } -// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function populateIndex( client: SearchClient, openAIClient: OpenAIClient, @@ -585,14 +583,12 @@ async function addVectorDescriptions( }); } -// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function deleteDataSourceConnections(client: SearchIndexerClient): Promise { for (let i = 1; i <= 2; i++) { await client.deleteDataSourceConnection(`my-data-source-${i}`); } } -// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function createSkillsets(client: SearchIndexerClient): Promise { const testCaseNames: string[] = ["my-azureblob-skillset-1", "my-azureblob-skillset-2"]; const skillSetNames: string[] = await client.listSkillsetsNames(); @@ -644,14 +640,12 @@ export async function createSkillsets(client: SearchIndexerClient): Promise { for (let i = 1; i <= 2; i++) { await client.deleteSkillset(`my-azureblob-skillset-${i}`); } } -// eslint-disable-next-line @azure/azure-sdk/ts-use-interface-parameters export async function createIndexers( client: SearchIndexerClient, targetIndexName: string, @@ -768,12 +762,11 @@ export function createRandomIndexName(): string { return `hotel-live-test-${Math.floor(Math.random() * 100000) + 1000000}`; } -function renameUniquelyInPlace( +async function renameUniquelyInPlace( prop: T, -): (obj: Record) => Promise { - return async (obj) => { - const hash = await computeSha256Hash(JSON.stringify(obj), "hex"); - const name = [obj[prop], hash.toLowerCase()].join("-"); - obj[prop] = name; - }; + obj: Record, +): Promise { + const hash = await computeSha256Hash(JSON.stringify(obj), "hex"); + const name = [obj[prop], hash.toLowerCase()].join("-"); + obj[prop] = name; } diff --git a/sdk/search/search-documents/test/snippets.spec.ts b/sdk/search/search-documents/test/snippets.spec.ts new file mode 100644 index 000000000000..c162d0af5b6b --- /dev/null +++ b/sdk/search/search-documents/test/snippets.spec.ts @@ -0,0 +1,380 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { describe, it } from "vitest"; +import { + AzureKeyCredential, + KnownSearchAudience, + odata, + SearchClient, + SearchFieldArray, + SearchIndexClient, + SearchIndexerClient, + SelectFields, +} from "../src/index.js"; +import { setLogLevel } from "@azure/logger"; + +describe("snippets", () => { + it("ReadmeSampleCreateClient_APIKey", async () => { + // To query and manipulate documents + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + // To manage indexes and synonymmaps + const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); + // @ts-preserve-whitespace + // To manage indexers, datasources and skillsets + const indexerClient = new SearchIndexerClient("", new AzureKeyCredential("")); + }); + + it("ReadmeSampleCreateClient_NationalCloud", async () => { + // To query and manipulate documents + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + { + audience: KnownSearchAudience.AzureChina, + }, + ); + // @ts-preserve-whitespace + // To manage indexes and synonymmaps + const indexClient = new SearchIndexClient("", new AzureKeyCredential(""), { + audience: KnownSearchAudience.AzureChina, + }); + // @ts-preserve-whitespace + // To manage indexers, datasources and skillsets + const indexerClient = new SearchIndexerClient( + "", + new AzureKeyCredential(""), + { + audience: KnownSearchAudience.AzureChina, + }, + ); + }); + + it("ReadmeSampleCreateIndex", async () => { + const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); + // @ts-preserve-whitespace + const result = await indexClient.createIndex({ + name: "example-index", + fields: [ + { + type: "Edm.String", + name: "id", + key: true, + }, + { + type: "Edm.Double", + name: "awesomenessLevel", + sortable: true, + filterable: true, + facetable: true, + }, + { + type: "Edm.String", + name: "description", + searchable: true, + }, + { + type: "Edm.ComplexType", + name: "details", + fields: [ + { + type: "Collection(Edm.String)", + name: "tags", + searchable: true, + }, + ], + }, + { + type: "Edm.Int32", + name: "hiddenWeight", + hidden: true, + }, + ], + }); + // @ts-preserve-whitespace + console.log(`Index created with name ${result.name}`); + }); + + it("ReadmeSampleGetDocument", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const result = await searchClient.getDocument("1234"); + }); + + it("ReadmeSampleUploadDocuments", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const uploadResult = await searchClient.uploadDocuments([ + // JSON objects matching the shape of the client's index + {}, + {}, + {}, + ]); + for (const result of uploadResult.results) { + console.log(`Uploaded ${result.key}; succeeded? ${result.succeeded}`); + } + }); + + it("ReadmeSampleSearch", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const searchResults = await searchClient.search("wifi -luxury"); + for await (const result of searchResults.results) { + console.log(result); + } + }); + + it("ReadmeSampleSearchLucene", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const searchResults = await searchClient.search('Category:budget AND "recently renovated"^3', { + queryType: "full", + searchMode: "all", + }); + for await (const result of searchResults.results) { + console.log(result); + } + }); + + it("ReadmeSampleSearchWithTypes", async () => { + // An example schema for documents in the index + interface Hotel { + hotelId?: string; + hotelName?: string | null; + description?: string | null; + descriptionVector?: Array; + parkingIncluded?: boolean | null; + lastRenovationDate?: Date | null; + rating?: number | null; + rooms?: Array<{ + beds?: number | null; + description?: string | null; + }>; + } + // @ts-preserve-whitespace + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const searchResults = await searchClient.search("wifi -luxury", { + // Only fields in Hotel can be added to this array. + // TS will complain if one is misspelled. + select: ["hotelId", "hotelName", "rooms/beds"], + }); + // @ts-preserve-whitespace + // These are other ways to declare the correct type for `select`. + const select = ["hotelId", "hotelName", "rooms/beds"] as const; + // This declaration lets you opt out of narrowing the TypeScript type of your documents, + // though the AI Search service will still only return these fields. + const selectWide: SelectFields[] = ["hotelId", "hotelName", "rooms/beds"]; + // This is an invalid declaration. Passing this to `select` will result in a compiler error + // unless you opt out of including the model in the client constructor. + const selectInvalid = ["hotelId", "hotelName", "rooms/beds"]; + // @ts-preserve-whitespace + for await (const result of searchResults.results) { + // result.document has hotelId, hotelName, and rating. + // Trying to access result.document.description would emit a TS error. + console.log(result.document.hotelName); + } + }); + + it("ReadmeSampleSearchWithOData", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const baseRateMax = 200; + const ratingMin = 4; + const searchResults = await searchClient.search("WiFi", { + filter: odata`Rooms/any(room: room/BaseRate lt ${baseRateMax}) and Rating ge ${ratingMin}`, + orderBy: ["Rating desc"], + select: ["hotelId", "hotelName", "Rating"], + }); + for await (const result of searchResults.results) { + // Each result will have "HotelId", "HotelName", and "Rating" + // in addition to the standard search result property "score" + console.log(result); + } + }); + + it("ReadmeSampleSearchWithVector", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const queryVector: number[] = [ + // Embedding of the query "What are the most luxurious hotels?" + ]; + const searchResults = await searchClient.search("*", { + vectorSearchOptions: { + queries: [ + { + kind: "vector", + vector: queryVector, + fields: ["descriptionVector"], + kNearestNeighborsCount: 3, + }, + ], + }, + }); + for await (const result of searchResults.results) { + // These results are the nearest neighbors to the query vector + console.log(result); + } + }); + + it("ReadmeSampleSearchWithFacets", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + // @ts-preserve-whitespace + const searchResults = await searchClient.search("WiFi", { + facets: ["category,count:3,sort:count", "rooms/baseRate,interval:100"], + }); + console.log(searchResults.facets); + // Output will look like: + // { + // 'rooms/baseRate': [ + // { count: 16, value: 0 }, + // { count: 17, value: 100 }, + // { count: 17, value: 200 } + // ], + // category: [ + // { count: 5, value: 'Budget' }, + // { count: 5, value: 'Luxury' }, + // { count: 5, value: 'Resort and Spa' } + // ] + // } + }); + + it("ReadmeSampleOdataUsage", async () => { + const baseRateMax = 200; + const ratingMin = 4; + const filter = odata`Rooms/any(room: room/BaseRate lt ${baseRateMax}) and Rating ge ${ratingMin}`; + }); + + it("ReadmeSampleSearchClient", async () => { + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + }); + + it("ReadmeSampleSearchClientWithModel", async () => { + type TModel = { + keyName: string; + field1?: string | null; + field2?: { anotherField?: string | null } | null; + }; + // @ts-preserve-whitespace + const searchClient = new SearchClient( + "", + "", + new AzureKeyCredential(""), + ); + }); + + it("ReadmeSampleAutocomplete", async () => { + type TModel = { + key: string; + azure?: { sdk: string | null } | null; + }; + // @ts-preserve-whitespace + const client = new SearchClient( + "endpoint.azure", + "indexName", + new AzureKeyCredential("key"), + ); + // @ts-preserve-whitespace + const searchFields: SearchFieldArray = ["azure/sdk"]; + // @ts-preserve-whitespace + const autocompleteResult = await client.autocomplete("searchText", "suggesterName", { + searchFields, + }); + }); + + it("ReadmeSampleSearchTModel", async () => { + type TModel = { + key: string; + azure?: { sdk: string | null } | null; + }; + // @ts-preserve-whitespace + const client = new SearchClient( + "endpoint.azure", + "indexName", + new AzureKeyCredential("key"), + ); + // @ts-preserve-whitespace + const select = ["azure/sdk"] as const; + const searchFields: SearchFieldArray = ["azure/sdk"]; + // @ts-preserve-whitespace + const searchResult = await client.search("searchText", { + select, + searchFields, + }); + }); + + it("ReadmeSampleSuggest", async () => { + type TModel = { + key: string; + azure?: { sdk: string | null } | null; + }; + // @ts-preserve-whitespace + const client = new SearchClient( + "endpoint.azure", + "indexName", + new AzureKeyCredential("key"), + ); + // @ts-preserve-whitespace + const select = ["azure/sdk"] as const; + const searchFields: SearchFieldArray = ["azure/sdk"]; + // @ts-preserve-whitespace + const suggestResult = await client.suggest("searchText", "suggesterName", { + select, + searchFields, + }); + }); + + it("ReadmeSampleSearchIndexClient", async () => { + const indexClient = new SearchIndexClient("", new AzureKeyCredential("")); + }); + + it("ReadmeSampleSearchIndexerClient", async () => { + const indexerClient = new SearchIndexerClient("", new AzureKeyCredential("")); + }); + + it("SetLogLevel", () => { + setLogLevel("info"); + }); +}); From 6dfa9b557a0f28815808543ba53722b8697a47f3 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 16:05:38 +0000 Subject: [PATCH 05/21] get tests partially working again, at least for some validation, since they are all skipped right now --- .../test/internal/{ => node}/base64.spec.ts | 2 +- .../test/public/node/searchClient.spec.ts | 18 +++++++++--------- .../test/public/node/searchIndexClient.spec.ts | 4 ++-- .../test/public/utils/recordedClient.ts | 16 ++++++++-------- .../test/public/utils/setup.ts | 4 ++-- 5 files changed, 22 insertions(+), 22 deletions(-) rename sdk/search/search-documents/test/internal/{ => node}/base64.spec.ts (86%) diff --git a/sdk/search/search-documents/test/internal/base64.spec.ts b/sdk/search/search-documents/test/internal/node/base64.spec.ts similarity index 86% rename from sdk/search/search-documents/test/internal/base64.spec.ts rename to sdk/search/search-documents/test/internal/node/base64.spec.ts index 252a27b289c0..ffcc7c14a7b7 100644 --- a/sdk/search/search-documents/test/internal/base64.spec.ts +++ b/sdk/search/search-documents/test/internal/node/base64.spec.ts @@ -1,6 +1,6 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -import { decode, encode } from "../../src/base64.js"; +import { decode, encode } from "../../../src/base64.js"; import { describe, it, assert } from "vitest"; describe("base64", () => { diff --git a/sdk/search/search-documents/test/public/node/searchClient.spec.ts b/sdk/search/search-documents/test/public/node/searchClient.spec.ts index 718ab5cb247a..db6f1825de45 100644 --- a/sdk/search/search-documents/test/public/node/searchClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchClient.spec.ts @@ -65,7 +65,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { }); // TODO: the preview-only tests are mixed in here when they should be in another describe (and removed in the stable release branch) - describe("stable", { skip: true }, () => { + describe("stable", () => { let recorder: Recorder; let searchClient: SearchClient; let indexClient: SearchIndexClient; @@ -95,7 +95,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { const baseSemanticOptions = () => ({ - queryLanguage: KnownQueryLanguage.EnUs, + // queryLanguage: KnownQueryLanguage.EnUs, queryType: "semantic", semanticSearchOptions: { configurationName: @@ -104,18 +104,18 @@ describe("SearchClient", { timeout: 20_000 }, () => { }, }) as const; - it("search with speller", async () => { + it.skip("search with speller", async () => { const searchResults = await searchClient.search("budjet", { skip: 0, top: 5, includeTotalCount: true, - queryLanguage: KnownQueryLanguage.EnUs, - speller: KnownQuerySpeller.Lexicon, + // queryLanguage: KnownQueryLanguage.EnUs, + // speller: KnownQuerySpeller.Lexicon, }); assert.equal(searchResults.count, 6); }); - it("search with semantic ranking", async () => { + it.skip("search with semantic ranking", async () => { const searchResults = await searchClient.search("luxury", { ...baseSemanticOptions(), skip: 0, @@ -125,7 +125,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(searchResults.count, 1); }); - it("search with document debug info", async () => { + it.skip("search with document debug info", async () => { const baseOptions = baseSemanticOptions(); const options = { ...baseOptions, @@ -164,12 +164,12 @@ describe("SearchClient", { timeout: 20_000 }, () => { }, }, }, - result.documentDebugInfo, + // result.documentDebugInfo, ); } }); - it("search with answers", async () => { + it.skip("search with answers", async () => { const baseOptions = baseSemanticOptions(); const options = { ...baseOptions, diff --git a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts index b3d34cfc1c5e..36dc8f33c62b 100644 --- a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts @@ -62,7 +62,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - describe("stable", { skip: true }, () => { + describe("stable", () => { let recorder: Recorder; let indexClient: SearchIndexClient; let TEST_INDEX_NAME: string; @@ -264,7 +264,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - it("creates the index object vector fields", async () => { + it.skip("creates the index object vector fields", async () => { const indexName: string = isLiveMode() ? createRandomIndexName() : "hotel-live-test4"; const algorithm: VectorSearchAlgorithmConfiguration = { diff --git a/sdk/search/search-documents/test/public/utils/recordedClient.ts b/sdk/search/search-documents/test/public/utils/recordedClient.ts index 0132e8edc71b..5ee282afb53b 100644 --- a/sdk/search/search-documents/test/public/utils/recordedClient.ts +++ b/sdk/search/search-documents/test/public/utils/recordedClient.ts @@ -6,7 +6,12 @@ import type { Recorder, RecorderStartOptions, SanitizerOptions } from "@azure-to import { assertEnvironmentVariable, env } from "@azure-tools/test-recorder"; import { isDefined } from "@azure/core-util"; import { OpenAIClient } from "@azure/openai"; -import { SearchClient, SearchIndexClient, SearchIndexerClient } from "../../../src/index.js"; +import { + AzureKeyCredential, + SearchClient, + SearchIndexClient, + SearchIndexerClient, +} from "../../../src/index.js"; export interface Clients { searchClient: SearchClient; @@ -92,10 +97,9 @@ export async function createClients( indexName = recorder.variable("TEST_INDEX_NAME", indexName); - const credential = createTestCredential(); + const credential = new AzureKeyCredential(assertEnvironmentVariable("API_KEY")); const endPoint: string = assertEnvironmentVariable("ENDPOINT"); - const openAIEndpoint = assertEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); const searchClient = new SearchClient( endPoint, @@ -119,11 +123,7 @@ export async function createClients( serviceVersion, }), ); - const openAIClient = new OpenAIClient( - openAIEndpoint, - credential, - recorder.configureClientOptions({}), - ); + const openAIClient = new OpenAIClient("", credential, recorder.configureClientOptions({})); return { searchClient, diff --git a/sdk/search/search-documents/test/public/utils/setup.ts b/sdk/search/search-documents/test/public/utils/setup.ts index fbe9553aa5ee..f9602918a80e 100644 --- a/sdk/search/search-documents/test/public/utils/setup.ts +++ b/sdk/search/search-documents/test/public/utils/setup.ts @@ -33,8 +33,8 @@ export async function createIndex( kind: "azureOpenAI", vectorizerName: "vector-search-vectorizer", parameters: { - deploymentId: assertEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME"), - resourceUrl: assertEnvironmentVariable("AZURE_OPENAI_ENDPOINT"), + // deploymentId: assertEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME"), + // resourceUrl: assertEnvironmentVariable("AZURE_OPENAI_ENDPOINT"), modelName: "text-embedding-ada-002", }, }, From 289584cc494ac31b10659765cf7b40a95543fc70 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 16:59:38 +0000 Subject: [PATCH 06/21] wip: description and compression changes --- sdk/search/search-documents/package.json | 4 +- .../search-documents-browser.api.diff.md | 2 +- .../review/search-documents-node.api.md | 13 +- .../src/generated/data/models/index.ts | 107 +++- .../src/generated/data/models/mappers.ts | 133 +++++ .../src/generated/data/models/parameters.ts | 10 + .../generated/data/operations/documents.ts | 7 + .../src/generated/data/searchClient.ts | 8 +- .../src/generated/service/models/index.ts | 558 +++++++++++++----- .../src/generated/service/models/mappers.ts | 280 ++++++++- .../generated/service/searchServiceClient.ts | 8 +- .../search-documents/src/serviceModels.ts | 4 + .../search-documents/src/serviceUtils.ts | 11 +- sdk/search/search-documents/swagger/Data.md | 9 +- .../search-documents/swagger/Service.md | 4 +- .../test/public/node/1220GA.spec.ts | 113 ++++ sdk/search/test-resources.bicep | 3 +- 17 files changed, 1081 insertions(+), 193 deletions(-) create mode 100644 sdk/search/search-documents/test/public/node/1220GA.spec.ts diff --git a/sdk/search/search-documents/package.json b/sdk/search/search-documents/package.json index d6c7a85f3794..d549c1a6a498 100644 --- a/sdk/search/search-documents/package.json +++ b/sdk/search/search-documents/package.json @@ -21,8 +21,8 @@ "pack": "pnpm pack 2>&1", "test": "npm run test:node && npm run test:browser", "test:browser": "npm run clean && dev-tool run build-package && dev-tool run build-test && dev-tool run test:vitest --browser", - "test:node": "dev-tool run test:vitest --test-proxy-debug", - "update-snippets": "dev-tool run update-snippets" + "test:node": "dev-tool run test:vitest --test-proxy-debug -- 1220GA", + "update-snippets": "echo Skipped." }, "files": [ "dist/", diff --git a/sdk/search/search-documents/review/search-documents-browser.api.diff.md b/sdk/search/search-documents/review/search-documents-browser.api.diff.md index b87770e20afc..cdfe41fb9894 100644 --- a/sdk/search/search-documents/review/search-documents-browser.api.diff.md +++ b/sdk/search/search-documents/review/search-documents-browser.api.diff.md @@ -7,7 +7,7 @@ For the complete API surface, see the corresponding -node.api.md file. =================================================================== --- NodeJS +++ browser -@@ -351,9 +351,9 @@ +@@ -352,9 +352,9 @@ // @public export type CreateSkillsetOptions = OperationOptions; diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index 7f17157e709c..a879dcf02584 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -158,7 +158,7 @@ export interface BaseSearchIndexerSkill { description?: string; inputs: InputFieldMappingEntry[]; name?: string; - odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; + odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; outputs: OutputFieldMappingEntry[]; } @@ -210,9 +210,10 @@ export interface BaseVectorSearchAlgorithmConfiguration { // @public export interface BaseVectorSearchCompression { compressionName: string; - defaultOversampling?: number; kind: "scalarQuantization" | "binaryQuantization"; - rerankWithOriginalVectors?: boolean; + // Warning: (ae-forgotten-export) The symbol "RescoringOptions" needs to be exported by the entry point index.d.ts + rescoringOptions?: RescoringOptions; + truncationDimension?: number; } // @public @@ -1355,7 +1356,8 @@ export enum KnownSearchIndexerDataSourceType { AzureSql = "azuresql", AzureTable = "azuretable", CosmosDb = "cosmosdb", - MySql = "mysql" + MySql = "mysql", + OneLake = "onelake" } // @public @@ -2245,6 +2247,7 @@ export interface SearchIndexerStatus { readonly executionHistory: IndexerExecutionResult[]; readonly lastResult?: IndexerExecutionResult; readonly limits: SearchIndexerLimits; + readonly name: string; readonly status: IndexerStatus; } @@ -2388,6 +2391,8 @@ export type SelectFields = (() => T extends TModel ? t export interface SemanticConfiguration { name: string; prioritizedFields: SemanticPrioritizedFields; + // Warning: (ae-forgotten-export) The symbol "RankingOrder" needs to be exported by the entry point index.d.ts + rankingOrder?: RankingOrder; } // @public (undocumented) diff --git a/sdk/search/search-documents/src/generated/data/models/index.ts b/sdk/search/search-documents/src/generated/data/models/index.ts index 37c8e7e053bb..818ff6392e98 100644 --- a/sdk/search/search-documents/src/generated/data/models/index.ts +++ b/sdk/search/search-documents/src/generated/data/models/index.ts @@ -177,6 +177,8 @@ export interface SearchRequest { scoringParameters?: string[]; /** The name of a scoring profile to evaluate match scores for matching documents in order to sort the results. */ scoringProfile?: string; + /** Enables a debugging tool that can be used to further explore your reranked results. */ + debug?: QueryDebugMode; /** A full-text search query expression; Use "*" or omit this parameter to match all documents. */ searchText?: string; /** The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter. */ @@ -237,6 +239,11 @@ export interface SearchResult { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly _rerankerScore?: number; + /** + * The relevance score computed by boosting the Reranker Score. Search results are sorted by the RerankerScore/RerankerBoostedScore based on useScoringProfileBoostedRanking in the Semantic Config. RerankerBoostedScore is only returned for queries of type 'semantic' + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly rerankerBoostedScore?: number; /** * Text fragments from the document that indicate the matching search terms, organized by each applicable field; null if hit highlighting was not enabled for the query. * NOTE: This property will not be serialized. It can only be populated by the server. @@ -247,6 +254,11 @@ export interface SearchResult { * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly _captions?: QueryCaptionResult[]; + /** + * Contains debugging information that can be used to further explore your search results. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly documentDebugInfo?: DocumentDebugInfo; } /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */ @@ -265,6 +277,65 @@ export interface QueryCaptionResult { readonly highlights?: string; } +/** Contains debugging information that can be used to further explore your search results. */ +export interface DocumentDebugInfo { + /** + * Contains debugging information specific to vector and hybrid search. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly vectors?: VectorsDebugInfo; +} + +export interface VectorsDebugInfo { + /** + * The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly subscores?: QueryResultDocumentSubscores; +} + +/** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */ +export interface QueryResultDocumentSubscores { + /** + * The BM25 or Classic score for the text portion of the query. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly text?: TextResult; + /** + * The vector similarity and @search.score values for each vector query. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly vectors?: { [propertyName: string]: SingleVectorFieldResult }[]; + /** + * The BM25 or Classic score for the text portion of the query. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly documentBoost?: number; +} + +/** The BM25 or Classic score for the text portion of the query. */ +export interface TextResult { + /** + * The BM25 or Classic score for the text portion of the query. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly searchScore?: number; +} + +/** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */ +export interface SingleVectorFieldResult { + /** + * The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly searchScore?: number; + /** + * The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly vectorSimilarity?: number; +} + /** Response containing suggestion query results from an index. */ export interface SuggestDocumentsResult { /** @@ -481,6 +552,8 @@ export interface SearchOptions { captions?: QueryCaptionType; /** Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase. */ semanticQuery?: string; + /** Enables a debugging tool that can be used to further explore your search results. */ + debug?: QueryDebugMode; } /** Parameter group */ @@ -525,20 +598,20 @@ export interface AutocompleteOptions { top?: number; } -/** Known values of {@link ApiVersion20240701} that the service accepts. */ -export enum KnownApiVersion20240701 { - /** Api Version '2024-07-01' */ - TwoThousandTwentyFour0701 = "2024-07-01", +/** Known values of {@link ApiVersion20250901} that the service accepts. */ +export enum KnownApiVersion20250901 { + /** Api Version '2025-09-01' */ + TwoThousandTwentyFive0901 = "2025-09-01", } /** - * Defines values for ApiVersion20240701. \ - * {@link KnownApiVersion20240701} can be used interchangeably with ApiVersion20240701, + * Defines values for ApiVersion20250901. \ + * {@link KnownApiVersion20250901} can be used interchangeably with ApiVersion20250901, * this enum contains the known values that the service supports. * ### Known values supported by the service - * **2024-07-01**: Api Version '2024-07-01' + * **2025-09-01**: Api Version '2025-09-01' */ -export type ApiVersion20240701 = string; +export type ApiVersion20250901 = string; /** Known values of {@link SemanticErrorMode} that the service accepts. */ export enum KnownSemanticErrorMode { @@ -594,6 +667,24 @@ export enum KnownQueryCaptionType { */ export type QueryCaptionType = string; +/** Known values of {@link QueryDebugMode} that the service accepts. */ +export enum KnownQueryDebugMode { + /** No query debugging information will be returned. */ + Disabled = "disabled", + /** Allows the user to further explore their hybrid and vector query results. */ + Vector = "vector", +} + +/** + * Defines values for QueryDebugMode. \ + * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **disabled**: No query debugging information will be returned. \ + * **vector**: Allows the user to further explore their hybrid and vector query results. + */ +export type QueryDebugMode = string; + /** Known values of {@link VectorQueryKind} that the service accepts. */ export enum KnownVectorQueryKind { /** Vector query where a raw vector value is provided. */ diff --git a/sdk/search/search-documents/src/generated/data/models/mappers.ts b/sdk/search/search-documents/src/generated/data/models/mappers.ts index acaddb9abfba..1499aaac8b89 100644 --- a/sdk/search/search-documents/src/generated/data/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/data/models/mappers.ts @@ -352,6 +352,12 @@ export const SearchRequest: coreClient.CompositeMapper = { name: "String", }, }, + debug: { + serializedName: "debug", + type: { + name: "String", + }, + }, searchText: { serializedName: "search", type: { @@ -524,6 +530,14 @@ export const SearchResult: coreClient.CompositeMapper = { name: "Number", }, }, + rerankerBoostedScore: { + serializedName: "@search\\.rerankerBoostedScore", + readOnly: true, + nullable: true, + type: { + name: "Number", + }, + }, _highlights: { serializedName: "@search\\.highlights", readOnly: true, @@ -548,6 +562,13 @@ export const SearchResult: coreClient.CompositeMapper = { }, }, }, + documentDebugInfo: { + serializedName: "@search\\.documentDebugInfo", + type: { + name: "Composite", + className: "DocumentDebugInfo", + }, + }, }, }, }; @@ -577,6 +598,118 @@ export const QueryCaptionResult: coreClient.CompositeMapper = { }, }; +export const DocumentDebugInfo: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "DocumentDebugInfo", + modelProperties: { + vectors: { + serializedName: "vectors", + type: { + name: "Composite", + className: "VectorsDebugInfo", + }, + }, + }, + }, +}; + +export const VectorsDebugInfo: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "VectorsDebugInfo", + modelProperties: { + subscores: { + serializedName: "subscores", + type: { + name: "Composite", + className: "QueryResultDocumentSubscores", + }, + }, + }, + }, +}; + +export const QueryResultDocumentSubscores: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "QueryResultDocumentSubscores", + modelProperties: { + text: { + serializedName: "text", + type: { + name: "Composite", + className: "TextResult", + }, + }, + vectors: { + serializedName: "vectors", + readOnly: true, + type: { + name: "Sequence", + element: { + type: { + name: "Dictionary", + value: { + type: { + name: "Composite", + className: "SingleVectorFieldResult", + }, + }, + }, + }, + }, + }, + documentBoost: { + serializedName: "documentBoost", + readOnly: true, + type: { + name: "Number", + }, + }, + }, + }, +}; + +export const TextResult: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "TextResult", + modelProperties: { + searchScore: { + serializedName: "searchScore", + readOnly: true, + type: { + name: "Number", + }, + }, + }, + }, +}; + +export const SingleVectorFieldResult: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "SingleVectorFieldResult", + modelProperties: { + searchScore: { + serializedName: "searchScore", + readOnly: true, + type: { + name: "Number", + }, + }, + vectorSimilarity: { + serializedName: "vectorSimilarity", + readOnly: true, + type: { + name: "Number", + }, + }, + }, + }, +}; + export const SuggestDocumentsResult: coreClient.CompositeMapper = { type: { name: "Composite", diff --git a/sdk/search/search-documents/src/generated/data/models/parameters.ts b/sdk/search/search-documents/src/generated/data/models/parameters.ts index a907865abac9..59b4a194e3e1 100644 --- a/sdk/search/search-documents/src/generated/data/models/parameters.ts +++ b/sdk/search/search-documents/src/generated/data/models/parameters.ts @@ -356,6 +356,16 @@ export const semanticQuery: OperationQueryParameter = { }, }; +export const debug: OperationQueryParameter = { + parameterPath: ["options", "searchOptions", "debug"], + mapper: { + serializedName: "debug", + type: { + name: "String", + }, + }, +}; + export const contentType: OperationParameter = { parameterPath: ["options", "contentType"], mapper: { diff --git a/sdk/search/search-documents/src/generated/data/operations/documents.ts b/sdk/search/search-documents/src/generated/data/operations/documents.ts index d79dae48a486..b10c90a18526 100644 --- a/sdk/search/search-documents/src/generated/data/operations/documents.ts +++ b/sdk/search/search-documents/src/generated/data/operations/documents.ts @@ -206,6 +206,9 @@ const searchGetOperationSpec: coreClient.OperationSpec = { 200: { bodyMapper: Mappers.SearchDocumentsResult, }, + 206: { + bodyMapper: Mappers.SearchDocumentsResult, + }, default: { bodyMapper: Mappers.ErrorResponse, }, @@ -237,6 +240,7 @@ const searchGetOperationSpec: coreClient.OperationSpec = { Parameters.answers, Parameters.captions, Parameters.semanticQuery, + Parameters.debug, ], urlParameters: [Parameters.endpoint, Parameters.indexName], headerParameters: [Parameters.accept], @@ -249,6 +253,9 @@ const searchPostOperationSpec: coreClient.OperationSpec = { 200: { bodyMapper: Mappers.SearchDocumentsResult, }, + 206: { + bodyMapper: Mappers.SearchDocumentsResult, + }, default: { bodyMapper: Mappers.ErrorResponse, }, diff --git a/sdk/search/search-documents/src/generated/data/searchClient.ts b/sdk/search/search-documents/src/generated/data/searchClient.ts index 10bfa133754c..36e9d5596706 100644 --- a/sdk/search/search-documents/src/generated/data/searchClient.ts +++ b/sdk/search/search-documents/src/generated/data/searchClient.ts @@ -15,7 +15,7 @@ import { import { DocumentsImpl } from "./operations/index.js"; import { Documents } from "./operationsInterfaces/index.js"; import { - ApiVersion20240701, + ApiVersion20250901, SearchClientOptionalParams, } from "./models/index.js"; @@ -23,7 +23,7 @@ import { export class SearchClient extends coreHttpCompat.ExtendedServiceClient { endpoint: string; indexName: string; - apiVersion: ApiVersion20240701; + apiVersion: ApiVersion20250901; /** * Initializes a new instance of the SearchClient class. @@ -35,7 +35,7 @@ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { constructor( endpoint: string, indexName: string, - apiVersion: ApiVersion20240701, + apiVersion: ApiVersion20250901, options?: SearchClientOptionalParams, ) { if (endpoint === undefined) { @@ -56,7 +56,7 @@ export class SearchClient extends coreHttpCompat.ExtendedServiceClient { requestContentType: "application/json; charset=utf-8", }; - const packageDetails = `azsdk-js-search-documents/12.2.0-beta.2`; + const packageDetails = `azsdk-js-search-documents/12.2.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` diff --git a/sdk/search/search-documents/src/generated/service/models/index.ts b/sdk/search/search-documents/src/generated/service/models/index.ts index 936104ccf8a7..6acebc8ac184 100644 --- a/sdk/search/search-documents/src/generated/service/models/index.ts +++ b/sdk/search/search-documents/src/generated/service/models/index.ts @@ -35,6 +35,7 @@ export type SearchIndexerSkillUnion = | CustomEntityLookupSkill | TextTranslationSkill | DocumentExtractionSkill + | DocumentIntelligenceLayoutSkill | WebApiSkill | AzureOpenAIEmbeddingSkill; export type CognitiveServicesAccountUnion = @@ -98,6 +99,7 @@ export type CharFilterUnion = | CharFilter | MappingCharFilter | PatternReplaceCharFilter; +export type LexicalNormalizerUnion = LexicalNormalizer | CustomNormalizer; export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity; export type VectorSearchAlgorithmConfigurationUnion = | VectorSearchAlgorithmConfiguration @@ -140,15 +142,15 @@ export interface SearchIndexerDataSource { /** Represents credentials that can be used to connect to a datasource. */ export interface DataSourceCredentials { - /** The connection string for the datasource. Set to `` (with brackets) if you don't want the connection string updated. Set to `` if you want to remove the connection string value from the datasource. */ + /** The connection string for the datasource. For Azure SQL, Azure Blob, ADLS Gen 2 and Azure Table, this would be the connection string or resource ID if using managed identity. For CosmosDB this would be a formatted connection string specifying ApiKind or resource ID for managed identity. For Onelake files, connection string would be either the workspace guid or workspace FQDN; Onelake only supports managed identity connections. Set to `` (with brackets) if you don't want the connection string updated. Set to `` if you want to remove the connection string value from the datasource. */ connectionString?: string; } /** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */ export interface SearchIndexerDataContainer { - /** The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. */ + /** The name of the table or view (for Azure SQL datasource), collection (for CosmosDB datasource), container (for Azure Blob and ADLS Gen 2 datasources), Azure Table (for Azure Table datasource), or lakehouse (for Onelake datasource) that will be indexed. */ name: string; - /** A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. */ + /** A query that is applied to this data container. For CosmosDB datasource query can flatten and filter data. For Azure Blob and ADLS Gen 2 query can filter by folders. For Azure Table query can filter by row data. For Onelake query can filter by folder or shortcut. Not supported by Azure SQL datasources. */ query?: string; } @@ -359,6 +361,11 @@ export interface ListIndexersResult { /** Represents the current status and execution history of an indexer. */ export interface SearchIndexerStatus { + /** + * The name of the indexer. + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly name: string; /** * Overall indexer status. * NOTE: This property will not be serialized. It can only be populated by the server. @@ -557,6 +564,7 @@ export interface SearchIndexerSkill { | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" + | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"; /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */ @@ -697,6 +705,8 @@ export interface ListSynonymMapsResult { export interface SearchIndex { /** The name of the index. */ name: string; + /** The description of the index. */ + description?: string; /** The fields of the index. */ fields: SearchField[]; /** The scoring profiles for the index. */ @@ -715,6 +725,8 @@ export interface SearchIndex { tokenFilters?: TokenFilterUnion[]; /** The character filters for the index. */ charFilters?: CharFilterUnion[]; + /** The normalizers for the index. */ + normalizers?: LexicalNormalizerUnion[]; /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */ encryptionKey?: SearchResourceEncryptionKey; /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */ @@ -753,6 +765,8 @@ export interface SearchField { searchAnalyzer?: LexicalAnalyzerName; /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */ indexAnalyzer?: LexicalAnalyzerName; + /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */ + normalizer?: LexicalNormalizerName; /** The dimensionality of the vector field. */ vectorSearchDimensions?: number; /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */ @@ -888,6 +902,14 @@ export interface CharFilter { name: string; } +/** Base type for normalizers. */ +export interface LexicalNormalizer { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; + /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */ + name: string; +} + /** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */ export interface Similarity { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -910,6 +932,8 @@ export interface SemanticConfiguration { name: string; /** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */ prioritizedFields: SemanticPrioritizedFields; + /** Specifies the score type to be used for the sort order of the search results. */ + rankingOrder?: RankingOrder; } /** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */ @@ -973,10 +997,20 @@ export interface VectorSearchCompression { kind: "scalarQuantization" | "binaryQuantization"; /** The name to associate with this particular configuration. */ compressionName: string; - /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */ - rerankWithOriginalVectors?: boolean; - /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */ + /** Contains the options for rescoring. */ + rescoringOptions?: RescoringOptions; + /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */ + truncationDimension?: number; +} + +/** Contains the options for rescoring. */ +export interface RescoringOptions { + /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */ + enableRescoring?: boolean; + /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */ defaultOversampling?: number; + /** Controls the storage method for original vectors. This setting is immutable. */ + rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; } /** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */ @@ -1015,6 +1049,8 @@ export interface AnalyzeRequest { analyzer?: string; /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */ tokenizer?: string; + /** The name of the normalizer to use to normalize the given text. */ + normalizer?: LexicalNormalizerName; /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ tokenFilters?: string[]; /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */ @@ -1241,6 +1277,16 @@ export interface CustomEntityAlias { fuzzyEditDistance?: number; } +/** Controls the cardinality for chunking the content. */ +export interface DocumentIntelligenceLayoutSkillChunkingProperties { + /** The unit of the chunk. */ + unit?: DocumentIntelligenceLayoutSkillChunkingUnit; + /** The maximum chunk length in characters. Default is 500. */ + maximumLength?: number; + /** The length of overlap provided between two text chunks. Default is 0. */ + overlapLength?: number; +} + /** Defines a data change detection policy that captures changes based on the value of a high water mark column. */ export interface HighWaterMarkChangeDetectionPolicy extends DataChangeDetectionPolicy { @@ -1482,14 +1528,42 @@ export interface DocumentExtractionSkill extends SearchIndexerSkill { configuration?: { [propertyName: string]: any }; } +/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */ +export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; + /** Controls the cardinality of the output format. Default is 'markdown'. */ + outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat; + /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */ + outputMode?: DocumentIntelligenceLayoutSkillOutputMode; + /** The depth of headers in the markdown output. Default is h6. */ + markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; + /** Controls the cardinality of the content extracted from the document by the skill */ + extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[]; + /** Controls the cardinality for chunking the content. */ + chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties; +} + /** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */ -export interface WebApiSkill extends SearchIndexerSkill, WebApiParameters { +export interface WebApiSkill extends SearchIndexerSkill { /** Polymorphic discriminator, which specifies the different types this object can be */ odatatype: "#Microsoft.Skills.Custom.WebApiSkill"; + /** The url for the Web API. */ + uri: string; + /** The headers required to make the http request. */ + httpHeaders?: { [propertyName: string]: string }; + /** The method for the http request. */ + httpMethod?: string; + /** The desired timeout for the request. Default is 30 seconds. */ + timeout?: string; /** The desired batch size which indicates number of documents. */ batchSize?: number; /** If set, the number of parallel calls that can be made to the Web API. */ degreeOfParallelism?: number; + /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */ + authResourceId?: string; + /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */ + authIdentity?: SearchIndexerDataIdentityUnion; } /** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */ @@ -2027,6 +2101,16 @@ export interface PatternReplaceCharFilter extends CharFilter { replacement: string; } +/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */ +export interface CustomNormalizer extends LexicalNormalizer { + /** Polymorphic discriminator, which specifies the different types this object can be */ + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; + /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */ + tokenFilters?: TokenFilterName[]; + /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */ + charFilters?: CharFilterName[]; +} + /** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */ export interface ClassicSimilarity extends Similarity { /** Polymorphic discriminator, which specifies the different types this object can be */ @@ -2115,35 +2199,37 @@ export interface SearchIndexerKnowledgeStoreObjectProjectionSelector export interface SearchIndexerKnowledgeStoreFileProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {} -/** Known values of {@link ApiVersion20240701} that the service accepts. */ -export enum KnownApiVersion20240701 { - /** Api Version '2024-07-01' */ - TwoThousandTwentyFour0701 = "2024-07-01", +/** Known values of {@link ApiVersion20250901} that the service accepts. */ +export enum KnownApiVersion20250901 { + /** Api Version '2025-09-01' */ + TwoThousandTwentyFive0901 = "2025-09-01", } /** - * Defines values for ApiVersion20240701. \ - * {@link KnownApiVersion20240701} can be used interchangeably with ApiVersion20240701, + * Defines values for ApiVersion20250901. \ + * {@link KnownApiVersion20250901} can be used interchangeably with ApiVersion20250901, * this enum contains the known values that the service supports. * ### Known values supported by the service - * **2024-07-01**: Api Version '2024-07-01' + * **2025-09-01**: Api Version '2025-09-01' */ -export type ApiVersion20240701 = string; +export type ApiVersion20250901 = string; /** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */ export enum KnownSearchIndexerDataSourceType { - /** Indicates an Azure SQL datasource. */ + /** Definition of an Azure SQL datasource whose credentials can either be a standard SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. */ AzureSql = "azuresql", - /** Indicates a CosmosDB datasource. */ + /** Definition of an CosmosDB datasource whose credentials can either be a formatted connection string containing details for AccountEndpoint, AccountKey, and Database for a key based connection or details for ResourceID and ApiKind for keyless connection. The container property refers to cosmosdb collection to be indexed and the optional query property refers to a SQL query on the collection. */ CosmosDb = "cosmosdb", - /** Indicates an Azure Blob datasource. */ + /** Definition of an Azure Blob datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. */ AzureBlob = "azureblob", - /** Indicates an Azure Table datasource. */ + /** Definition of an Azure Table datasource whose credentials can either be a table connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property can be used to filter rows. */ AzureTable = "azuretable", - /** Indicates a MySql datasource. */ + /** Definition of an Azure SQL datasource whose credentials can either be a standard ADO.NET formatted SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. */ MySql = "mysql", - /** Indicates an ADLS Gen2 datasource. */ + /** Definition of an Azure ADLS Gen 2 datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. */ AdlsGen2 = "adlsgen2", + /** Definition of an Microsoft Fabric Onelake datasource whose credentials can either be the Fabric workspace GUID or a workspace FQDN. The container property refers to the lakehouse GUID and the optional query property refers to folders or shortcuts in the lakehouse. */ + OneLake = "onelake", } /** @@ -2151,12 +2237,13 @@ export enum KnownSearchIndexerDataSourceType { * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType, * this enum contains the known values that the service supports. * ### Known values supported by the service - * **azuresql**: Indicates an Azure SQL datasource. \ - * **cosmosdb**: Indicates a CosmosDB datasource. \ - * **azureblob**: Indicates an Azure Blob datasource. \ - * **azuretable**: Indicates an Azure Table datasource. \ - * **mysql**: Indicates a MySql datasource. \ - * **adlsgen2**: Indicates an ADLS Gen2 datasource. + * **azuresql**: Definition of an Azure SQL datasource whose credentials can either be a standard SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. \ + * **cosmosdb**: Definition of an CosmosDB datasource whose credentials can either be a formatted connection string containing details for AccountEndpoint, AccountKey, and Database for a key based connection or details for ResourceID and ApiKind for keyless connection. The container property refers to cosmosdb collection to be indexed and the optional query property refers to a SQL query on the collection. \ + * **azureblob**: Definition of an Azure Blob datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. \ + * **azuretable**: Definition of an Azure Table datasource whose credentials can either be a table connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property can be used to filter rows. \ + * **mysql**: Definition of an Azure SQL datasource whose credentials can either be a standard ADO.NET formatted SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. \ + * **adlsgen2**: Definition of an Azure ADLS Gen 2 datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. \ + * **onelake**: Definition of an Microsoft Fabric Onelake datasource whose credentials can either be the Fabric workspace GUID or a workspace FQDN. The container property refers to the lakehouse GUID and the optional query property refers to folders or shortcuts in the lakehouse. */ export type SearchIndexerDataSourceType = string; @@ -2628,6 +2715,33 @@ export enum KnownLexicalAnalyzerName { */ export type LexicalAnalyzerName = string; +/** Known values of {@link LexicalNormalizerName} that the service accepts. */ +export enum KnownLexicalNormalizerName { + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ + Elision = "elision", + /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ + Standard = "standard", + /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ + Uppercase = "uppercase", +} + +/** + * Defines values for LexicalNormalizerName. \ + * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html + */ +export type LexicalNormalizerName = string; + /** Known values of {@link VectorEncodingFormat} that the service accepts. */ export enum KnownVectorEncodingFormat { /** Encoding format representing bits packed into a wider data type. */ @@ -2643,6 +2757,24 @@ export enum KnownVectorEncodingFormat { */ export type VectorEncodingFormat = string; +/** Known values of {@link RankingOrder} that the service accepts. */ +export enum KnownRankingOrder { + /** Sets sort order as BoostedRerankerScore */ + BoostedRerankerScore = "BoostedRerankerScore", + /** Sets sort order as ReRankerScore */ + ReRankerScore = "RerankerScore", +} + +/** + * Defines values for RankingOrder. \ + * {@link KnownRankingOrder} can be used interchangeably with RankingOrder, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \ + * **RerankerScore**: Sets sort order as ReRankerScore + */ +export type RankingOrder = string; + /** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */ export enum KnownVectorSearchAlgorithmKind { /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */ @@ -2697,6 +2829,153 @@ export enum KnownVectorSearchCompressionKind { */ export type VectorSearchCompressionKind = string; +/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */ +export enum KnownVectorSearchCompressionRescoreStorageMethod { + /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */ + PreserveOriginals = "preserveOriginals", + /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */ + DiscardOriginals = "discardOriginals", +} + +/** + * Defines values for VectorSearchCompressionRescoreStorageMethod. \ + * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \ + * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. + */ +export type VectorSearchCompressionRescoreStorageMethod = string; + +/** Known values of {@link TokenFilterName} that the service accepts. */ +export enum KnownTokenFilterName { + /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */ + ArabicNormalization = "arabic_normalization", + /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */ + Apostrophe = "apostrophe", + /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ + AsciiFolding = "asciifolding", + /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */ + CjkBigram = "cjk_bigram", + /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */ + CjkWidth = "cjk_width", + /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */ + Classic = "classic", + /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */ + CommonGram = "common_grams", + /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */ + EdgeNGram = "edgeNGram_v2", + /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ + Elision = "elision", + /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */ + GermanNormalization = "german_normalization", + /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */ + HindiNormalization = "hindi_normalization", + /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */ + IndicNormalization = "indic_normalization", + /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */ + KeywordRepeat = "keyword_repeat", + /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */ + KStem = "kstem", + /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */ + Length = "length", + /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */ + Limit = "limit", + /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ + Lowercase = "lowercase", + /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */ + NGram = "nGram_v2", + /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */ + PersianNormalization = "persian_normalization", + /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */ + Phonetic = "phonetic", + /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */ + PorterStem = "porter_stem", + /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ + Reverse = "reverse", + /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */ + ScandinavianNormalization = "scandinavian_normalization", + /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */ + ScandinavianFoldingNormalization = "scandinavian_folding", + /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */ + Shingle = "shingle", + /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */ + Snowball = "snowball", + /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */ + SoraniNormalization = "sorani_normalization", + /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */ + Stemmer = "stemmer", + /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */ + Stopwords = "stopwords", + /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */ + Trim = "trim", + /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */ + Truncate = "truncate", + /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */ + Unique = "unique", + /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ + Uppercase = "uppercase", + /** Splits words into subwords and performs optional transformations on subword groups. */ + WordDelimiter = "word_delimiter", +} + +/** + * Defines values for TokenFilterName. \ + * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ + * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ + * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ + * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ + * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ + * **classic**: Removes English possessives, and dots from acronyms. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ + * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ + * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ + * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ + * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ + * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ + * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ + * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ + * **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ + * **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ + * **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ + * **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ + * **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ + * **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ + * **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ + * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\/\/tartarus.org\/~martin\/PorterStemmer \ + * **reverse**: Reverses the token string. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ + * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ + * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ + * **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ + * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ + * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ + * **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ + * **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ + * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ + * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ + * **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ + * **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ + * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. + */ +export type TokenFilterName = string; + +/** Known values of {@link CharFilterName} that the service accepts. */ +export enum KnownCharFilterName { + /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */ + HtmlStrip = "html_strip", +} + +/** + * Defines values for CharFilterName. \ + * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html + */ +export type CharFilterName = string; + /** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */ export enum KnownVectorSearchAlgorithmMetric { /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */ @@ -4167,6 +4446,102 @@ export enum KnownTextTranslationSkillLanguage { */ export type TextTranslationSkillLanguage = string; +/** Known values of {@link DocumentIntelligenceLayoutSkillOutputFormat} that the service accepts. */ +export enum KnownDocumentIntelligenceLayoutSkillOutputFormat { + /** Specify the format of the output as text. */ + Text = "text", + /** Specify the format of the output as markdown. */ + Markdown = "markdown", +} + +/** + * Defines values for DocumentIntelligenceLayoutSkillOutputFormat. \ + * {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **text**: Specify the format of the output as text. \ + * **markdown**: Specify the format of the output as markdown. + */ +export type DocumentIntelligenceLayoutSkillOutputFormat = string; + +/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */ +export enum KnownDocumentIntelligenceLayoutSkillOutputMode { + /** Specify that the output should be parsed as 'oneToMany'. */ + OneToMany = "oneToMany", +} + +/** + * Defines values for DocumentIntelligenceLayoutSkillOutputMode. \ + * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **oneToMany**: Specify that the output should be parsed as 'oneToMany'. + */ +export type DocumentIntelligenceLayoutSkillOutputMode = string; + +/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */ +export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { + /** Header level 1. */ + H1 = "h1", + /** Header level 2. */ + H2 = "h2", + /** Header level 3. */ + H3 = "h3", + /** Header level 4. */ + H4 = "h4", + /** Header level 5. */ + H5 = "h5", + /** Header level 6. */ + H6 = "h6", +} + +/** + * Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \ + * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **h1**: Header level 1. \ + * **h2**: Header level 2. \ + * **h3**: Header level 3. \ + * **h4**: Header level 4. \ + * **h5**: Header level 5. \ + * **h6**: Header level 6. + */ +export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; + +/** Known values of {@link DocumentIntelligenceLayoutSkillExtractionOptions} that the service accepts. */ +export enum KnownDocumentIntelligenceLayoutSkillExtractionOptions { + /** Specify that image content should be extracted from the document. */ + Images = "images", + /** Specify that location metadata should be extracted from the document. */ + LocationMetadata = "locationMetadata", +} + +/** + * Defines values for DocumentIntelligenceLayoutSkillExtractionOptions. \ + * {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **images**: Specify that image content should be extracted from the document. \ + * **locationMetadata**: Specify that location metadata should be extracted from the document. + */ +export type DocumentIntelligenceLayoutSkillExtractionOptions = string; + +/** Known values of {@link DocumentIntelligenceLayoutSkillChunkingUnit} that the service accepts. */ +export enum KnownDocumentIntelligenceLayoutSkillChunkingUnit { + /** Specifies chunk by characters. */ + Characters = "characters", +} + +/** + * Defines values for DocumentIntelligenceLayoutSkillChunkingUnit. \ + * {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit, + * this enum contains the known values that the service supports. + * ### Known values supported by the service + * **characters**: Specifies chunk by characters. + */ +export type DocumentIntelligenceLayoutSkillChunkingUnit = string; + /** Known values of {@link LexicalTokenizerName} that the service accepts. */ export enum KnownLexicalTokenizerName { /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */ @@ -4218,135 +4593,6 @@ export enum KnownLexicalTokenizerName { */ export type LexicalTokenizerName = string; -/** Known values of {@link TokenFilterName} that the service accepts. */ -export enum KnownTokenFilterName { - /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */ - ArabicNormalization = "arabic_normalization", - /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */ - Apostrophe = "apostrophe", - /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */ - AsciiFolding = "asciifolding", - /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */ - CjkBigram = "cjk_bigram", - /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */ - CjkWidth = "cjk_width", - /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */ - Classic = "classic", - /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */ - CommonGram = "common_grams", - /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */ - EdgeNGram = "edgeNGram_v2", - /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */ - Elision = "elision", - /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */ - GermanNormalization = "german_normalization", - /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */ - HindiNormalization = "hindi_normalization", - /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */ - IndicNormalization = "indic_normalization", - /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */ - KeywordRepeat = "keyword_repeat", - /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */ - KStem = "kstem", - /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */ - Length = "length", - /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */ - Limit = "limit", - /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */ - Lowercase = "lowercase", - /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */ - NGram = "nGram_v2", - /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */ - PersianNormalization = "persian_normalization", - /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */ - Phonetic = "phonetic", - /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */ - PorterStem = "porter_stem", - /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */ - Reverse = "reverse", - /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */ - ScandinavianNormalization = "scandinavian_normalization", - /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */ - ScandinavianFoldingNormalization = "scandinavian_folding", - /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */ - Shingle = "shingle", - /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */ - Snowball = "snowball", - /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */ - SoraniNormalization = "sorani_normalization", - /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */ - Stemmer = "stemmer", - /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */ - Stopwords = "stopwords", - /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */ - Trim = "trim", - /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */ - Truncate = "truncate", - /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */ - Unique = "unique", - /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */ - Uppercase = "uppercase", - /** Splits words into subwords and performs optional transformations on subword groups. */ - WordDelimiter = "word_delimiter", -} - -/** - * Defines values for TokenFilterName. \ - * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \ - * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \ - * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \ - * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \ - * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \ - * **classic**: Removes English possessives, and dots from acronyms. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \ - * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \ - * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \ - * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \ - * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \ - * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \ - * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \ - * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \ - * **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \ - * **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \ - * **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \ - * **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \ - * **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \ - * **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \ - * **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \ - * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\/\/tartarus.org\/~martin\/PorterStemmer \ - * **reverse**: Reverses the token string. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \ - * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \ - * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \ - * **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \ - * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \ - * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \ - * **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \ - * **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \ - * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \ - * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \ - * **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \ - * **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \ - * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups. - */ -export type TokenFilterName = string; - -/** Known values of {@link CharFilterName} that the service accepts. */ -export enum KnownCharFilterName { - /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */ - HtmlStrip = "html_strip", -} - -/** - * Defines values for CharFilterName. \ - * {@link KnownCharFilterName} can be used interchangeably with CharFilterName, - * this enum contains the known values that the service supports. - * ### Known values supported by the service - * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html - */ -export type CharFilterName = string; - /** Known values of {@link RegexFlags} that the service accepts. */ export enum KnownRegexFlags { /** Enables canonical equivalence. */ diff --git a/sdk/search/search-documents/src/generated/service/models/mappers.ts b/sdk/search/search-documents/src/generated/service/models/mappers.ts index fc8bfb778c7b..ad03ad73887f 100644 --- a/sdk/search/search-documents/src/generated/service/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/service/models/mappers.ts @@ -698,6 +698,14 @@ export const SearchIndexerStatus: coreClient.CompositeMapper = { name: "Composite", className: "SearchIndexerStatus", modelProperties: { + name: { + serializedName: "name", + required: true, + readOnly: true, + type: { + name: "String", + }, + }, status: { serializedName: "status", required: true, @@ -1502,6 +1510,12 @@ export const SearchIndex: coreClient.CompositeMapper = { name: "String", }, }, + description: { + serializedName: "description", + type: { + name: "String", + }, + }, fields: { serializedName: "fields", required: true, @@ -1600,6 +1614,18 @@ export const SearchIndex: coreClient.CompositeMapper = { }, }, }, + normalizers: { + serializedName: "normalizers", + type: { + name: "Sequence", + element: { + type: { + name: "Composite", + className: "LexicalNormalizer", + }, + }, + }, + }, encryptionKey: { serializedName: "encryptionKey", type: { @@ -1720,9 +1746,16 @@ export const SearchField: coreClient.CompositeMapper = { name: "String", }, }, + normalizer: { + serializedName: "normalizer", + nullable: true, + type: { + name: "String", + }, + }, vectorSearchDimensions: { constraints: { - InclusiveMaximum: 2048, + InclusiveMaximum: 4096, InclusiveMinimum: 2, }, serializedName: "dimensions", @@ -2055,6 +2088,34 @@ export const CharFilter: coreClient.CompositeMapper = { }, }; +export const LexicalNormalizer: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "LexicalNormalizer", + uberParent: "LexicalNormalizer", + polymorphicDiscriminator: { + serializedName: "@odata\\.type", + clientName: "odatatype", + }, + modelProperties: { + odatatype: { + serializedName: "@odata\\.type", + required: true, + type: { + name: "String", + }, + }, + name: { + serializedName: "name", + required: true, + type: { + name: "String", + }, + }, + }, + }, +}; + export const Similarity: coreClient.CompositeMapper = { type: { name: "Composite", @@ -2122,6 +2183,13 @@ export const SemanticConfiguration: coreClient.CompositeMapper = { className: "SemanticPrioritizedFields", }, }, + rankingOrder: { + serializedName: "rankingOrder", + nullable: true, + type: { + name: "String", + }, + }, }, }, }; @@ -2354,9 +2422,33 @@ export const VectorSearchCompression: coreClient.CompositeMapper = { name: "String", }, }, - rerankWithOriginalVectors: { + rescoringOptions: { + serializedName: "rescoringOptions", + type: { + name: "Composite", + className: "RescoringOptions", + }, + }, + truncationDimension: { + serializedName: "truncationDimension", + nullable: true, + type: { + name: "Number", + }, + }, + }, + }, +}; + +export const RescoringOptions: coreClient.CompositeMapper = { + type: { + name: "Composite", + className: "RescoringOptions", + modelProperties: { + enableRescoring: { defaultValue: true, - serializedName: "rerankWithOriginalVectors", + serializedName: "enableRescoring", + nullable: true, type: { name: "Boolean", }, @@ -2368,6 +2460,13 @@ export const VectorSearchCompression: coreClient.CompositeMapper = { name: "Number", }, }, + rescoreStorageMethod: { + serializedName: "rescoreStorageMethod", + nullable: true, + type: { + name: "String", + }, + }, }, }, }; @@ -2452,6 +2551,12 @@ export const AnalyzeRequest: coreClient.CompositeMapper = { name: "String", }, }, + normalizer: { + serializedName: "normalizer", + type: { + name: "String", + }, + }, tokenFilters: { serializedName: "tokenFilters", type: { @@ -3127,6 +3232,38 @@ export const CustomEntityAlias: coreClient.CompositeMapper = { }, }; +export const DocumentIntelligenceLayoutSkillChunkingProperties: coreClient.CompositeMapper = + { + type: { + name: "Composite", + className: "DocumentIntelligenceLayoutSkillChunkingProperties", + modelProperties: { + unit: { + defaultValue: "characters", + serializedName: "unit", + nullable: true, + type: { + name: "String", + }, + }, + maximumLength: { + serializedName: "maximumLength", + nullable: true, + type: { + name: "Number", + }, + }, + overlapLength: { + serializedName: "overlapLength", + nullable: true, + type: { + name: "Number", + }, + }, + }, + }, + }; + export const HighWaterMarkChangeDetectionPolicy: coreClient.CompositeMapper = { serializedName: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy", type: { @@ -3807,6 +3944,62 @@ export const DocumentExtractionSkill: coreClient.CompositeMapper = { }, }; +export const DocumentIntelligenceLayoutSkill: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill", + type: { + name: "Composite", + className: "DocumentIntelligenceLayoutSkill", + uberParent: "SearchIndexerSkill", + polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, + modelProperties: { + ...SearchIndexerSkill.type.modelProperties, + outputFormat: { + defaultValue: "markdown", + serializedName: "outputFormat", + nullable: true, + type: { + name: "String", + }, + }, + outputMode: { + defaultValue: "oneToMany", + serializedName: "outputMode", + nullable: true, + type: { + name: "String", + }, + }, + markdownHeaderDepth: { + defaultValue: "h6", + serializedName: "markdownHeaderDepth", + nullable: true, + type: { + name: "String", + }, + }, + extractionOptions: { + serializedName: "extractionOptions", + nullable: true, + type: { + name: "Sequence", + element: { + type: { + name: "String", + }, + }, + }, + }, + chunkingProperties: { + serializedName: "chunkingProperties", + type: { + name: "Composite", + className: "DocumentIntelligenceLayoutSkillChunkingProperties", + }, + }, + }, + }, +}; + export const WebApiSkill: coreClient.CompositeMapper = { serializedName: "#Microsoft.Skills.Custom.WebApiSkill", type: { @@ -3816,7 +4009,32 @@ export const WebApiSkill: coreClient.CompositeMapper = { polymorphicDiscriminator: SearchIndexerSkill.type.polymorphicDiscriminator, modelProperties: { ...SearchIndexerSkill.type.modelProperties, - ...WebApiParameters.type.modelProperties, + uri: { + serializedName: "uri", + required: true, + type: { + name: "String", + }, + }, + httpHeaders: { + serializedName: "httpHeaders", + type: { + name: "Dictionary", + value: { type: { name: "String" } }, + }, + }, + httpMethod: { + serializedName: "httpMethod", + type: { + name: "String", + }, + }, + timeout: { + serializedName: "timeout", + type: { + name: "TimeSpan", + }, + }, batchSize: { serializedName: "batchSize", nullable: true, @@ -3831,6 +4049,20 @@ export const WebApiSkill: coreClient.CompositeMapper = { name: "Number", }, }, + authResourceId: { + serializedName: "authResourceId", + nullable: true, + type: { + name: "String", + }, + }, + authIdentity: { + serializedName: "authIdentity", + type: { + name: "Composite", + className: "SearchIndexerDataIdentity", + }, + }, }, }, }; @@ -5669,6 +5901,41 @@ export const PatternReplaceCharFilter: coreClient.CompositeMapper = { }, }; +export const CustomNormalizer: coreClient.CompositeMapper = { + serializedName: "#Microsoft.Azure.Search.CustomNormalizer", + type: { + name: "Composite", + className: "CustomNormalizer", + uberParent: "LexicalNormalizer", + polymorphicDiscriminator: LexicalNormalizer.type.polymorphicDiscriminator, + modelProperties: { + ...LexicalNormalizer.type.modelProperties, + tokenFilters: { + serializedName: "tokenFilters", + type: { + name: "Sequence", + element: { + type: { + name: "String", + }, + }, + }, + }, + charFilters: { + serializedName: "charFilters", + type: { + name: "Sequence", + element: { + type: { + name: "String", + }, + }, + }, + }, + }, + }, +}; + export const ClassicSimilarity: coreClient.CompositeMapper = { serializedName: "#Microsoft.Azure.Search.ClassicSimilarity", type: { @@ -5898,6 +6165,7 @@ export let discriminators = { LexicalTokenizer: LexicalTokenizer, TokenFilter: TokenFilter, CharFilter: CharFilter, + LexicalNormalizer: LexicalNormalizer, Similarity: Similarity, VectorSearchAlgorithmConfiguration: VectorSearchAlgorithmConfiguration, VectorSearchVectorizer: VectorSearchVectorizer, @@ -5938,6 +6206,8 @@ export let discriminators = { TextTranslationSkill, "SearchIndexerSkill.#Microsoft.Skills.Util.DocumentExtractionSkill": DocumentExtractionSkill, + "SearchIndexerSkill.#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": + DocumentIntelligenceLayoutSkill, "SearchIndexerSkill.#Microsoft.Skills.Custom.WebApiSkill": WebApiSkill, "SearchIndexerSkill.#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": AzureOpenAIEmbeddingSkill, @@ -6017,6 +6287,8 @@ export let discriminators = { "CharFilter.#Microsoft.Azure.Search.MappingCharFilter": MappingCharFilter, "CharFilter.#Microsoft.Azure.Search.PatternReplaceCharFilter": PatternReplaceCharFilter, + "LexicalNormalizer.#Microsoft.Azure.Search.CustomNormalizer": + CustomNormalizer, "Similarity.#Microsoft.Azure.Search.ClassicSimilarity": ClassicSimilarity, "Similarity.#Microsoft.Azure.Search.BM25Similarity": BM25Similarity, "VectorSearchAlgorithmConfiguration.hnsw": HnswAlgorithmConfiguration, diff --git a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts index 2c5f749bb603..968f25948081 100644 --- a/sdk/search/search-documents/src/generated/service/searchServiceClient.ts +++ b/sdk/search/search-documents/src/generated/service/searchServiceClient.ts @@ -30,7 +30,7 @@ import { import * as Parameters from "./models/parameters.js"; import * as Mappers from "./models/mappers.js"; import { - ApiVersion20240701, + ApiVersion20250901, SearchServiceClientOptionalParams, GetServiceStatisticsOptionalParams, GetServiceStatisticsResponse, @@ -39,7 +39,7 @@ import { /** @internal */ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { endpoint: string; - apiVersion: ApiVersion20240701; + apiVersion: ApiVersion20250901; /** * Initializes a new instance of the SearchServiceClient class. @@ -49,7 +49,7 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { */ constructor( endpoint: string, - apiVersion: ApiVersion20240701, + apiVersion: ApiVersion20250901, options?: SearchServiceClientOptionalParams, ) { if (endpoint === undefined) { @@ -67,7 +67,7 @@ export class SearchServiceClient extends coreHttpCompat.ExtendedServiceClient { requestContentType: "application/json; charset=utf-8", }; - const packageDetails = `azsdk-js-search-documents/12.2.0-beta.2`; + const packageDetails = `azsdk-js-search-documents/12.2.0`; const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix ? `${options.userAgentOptions.userAgentPrefix} ${packageDetails}` diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index 2a8f2e41a5f0..684f415a510e 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -1054,6 +1054,10 @@ export interface SearchIndex { * The name of the index. */ name: string; + /** + * The description of the index. + */ + description?: string; /** * The fields of the index. */ diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index afc5a0aa475c..042ac23b2b86 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -44,7 +44,12 @@ import { VectorSearchVectorizerUnion as GeneratedVectorSearchVectorizer, WebApiVectorizer as GeneratedWebAPIVectorizer, } from "./generated/service/models/index.js"; -import { SearchResult, SelectFields, SuggestDocumentsResult, SuggestResult } from "./indexModels.js"; +import { + SearchResult, + SelectFields, + SuggestDocumentsResult, + SuggestResult, +} from "./indexModels.js"; import { logger } from "./logger.js"; import { AzureOpenAIVectorizer, @@ -88,7 +93,7 @@ import { WebApiVectorizer, } from "./serviceModels.js"; -export const defaultServiceVersion = "2024-07-01"; +export const defaultServiceVersion = "2025-09-01"; const knownSkills: Record<`${SearchIndexerSkillUnion["odatatype"]}`, true> = { "#Microsoft.Skills.Custom.WebApiSkill": true, @@ -110,6 +115,7 @@ const knownSkills: Record<`${SearchIndexerSkillUnion["odatatype"]}`, true> = { "#Microsoft.Skills.Util.ShaperSkill": true, "#Microsoft.Skills.Vision.ImageAnalysisSkill": true, "#Microsoft.Skills.Vision.OcrSkill": true, + "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": true, }; export function convertSkillsToPublic(skills: SearchIndexerSkillUnion[]): SearchIndexerSkill[] { @@ -400,6 +406,7 @@ function convertEncryptionKeyToGenerated( export function generatedIndexToPublicIndex(generatedIndex: GeneratedSearchIndex): SearchIndex { return { name: generatedIndex.name, + description: generatedIndex.description, defaultScoringProfile: generatedIndex.defaultScoringProfile, corsOptions: generatedIndex.corsOptions, suggesters: generatedIndex.suggesters, diff --git a/sdk/search/search-documents/swagger/Data.md b/sdk/search/search-documents/swagger/Data.md index ef404c2ed595..73ef30e2f4c6 100644 --- a/sdk/search/search-documents/swagger/Data.md +++ b/sdk/search/search-documents/swagger/Data.md @@ -10,13 +10,13 @@ generate-metadata: false license-header: MICROSOFT_MIT_NO_VERSION output-folder: ../ source-code-folder-path: ./src/generated/data -input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/dc27f9b32787533cd4d07fe0de5245f2f8354dbe/specification/search/data-plane/Azure.Search/stable/2024-07-01/searchindex.json +input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/613315ec5a543cdc0f0b259b23a5ae02f46663dc/specification/search/data-plane/Azure.Search/stable/2025-09-01/searchindex.json add-credentials: false title: SearchClient use-extension: "@autorest/typescript": "6.0.34" core-http-compat-mode: true -package-version: 12.2.0-beta.2 +package-version: 12.2.0 disable-async-iterators: true api-version-parameter: choice v3: true @@ -167,7 +167,8 @@ directive: $["$ref"] = "#/definitions/HybridSearch"; ``` - +``` diff --git a/sdk/search/search-documents/swagger/Service.md b/sdk/search/search-documents/swagger/Service.md index 7268baf7432e..bf0efe8bd51a 100644 --- a/sdk/search/search-documents/swagger/Service.md +++ b/sdk/search/search-documents/swagger/Service.md @@ -10,12 +10,12 @@ generate-metadata: false license-header: MICROSOFT_MIT_NO_VERSION output-folder: ../ source-code-folder-path: ./src/generated/service -input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/dc27f9b32787533cd4d07fe0de5245f2f8354dbe/specification/search/data-plane/Azure.Search/stable/2024-07-01/searchservice.json +input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/613315ec5a543cdc0f0b259b23a5ae02f46663dc/specification/search/data-plane/Azure.Search/stable/2025-09-01/searchservice.json add-credentials: false use-extension: "@autorest/typescript": "6.0.34" core-http-compat-mode: true -package-version: 12.2.0-beta.2 +package-version: 12.2.0 disable-async-iterators: true api-version-parameter: choice v3: true diff --git a/sdk/search/search-documents/test/public/node/1220GA.spec.ts b/sdk/search/search-documents/test/public/node/1220GA.spec.ts new file mode 100644 index 000000000000..22b2daf2a533 --- /dev/null +++ b/sdk/search/search-documents/test/public/node/1220GA.spec.ts @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +import { + VectorSearch, + SearchField, + SearchIndex, + IndexDocumentsClient, + SearchClient, + SearchIndexClient, + AzureKeyCredential, + SearchIndexerClient, +} from "@azure/search-documents"; +import { describe, beforeEach, it, expect } from "vitest"; +import config from "dotenv/config"; +import { afterEach } from "node:test"; + +// Test index names (prefixed to avoid conflicts) + +const credential = new AzureKeyCredential(process.env.API_KEY!); +const indexClient = new SearchIndexClient(process.env.ENDPOINT!, credential); +const indexerClient = new SearchIndexerClient(process.env.ENDPOINT!, credential); + +describe("1220GA", () => { + it("Vector Search HNSW storage optimization", async () => { + const vectorSearch: VectorSearch = { + algorithms: [ + { + name: "hnsw-config", + kind: "hnsw", + parameters: { + metric: "cosine", + m: 4, + efConstruction: 400, + efSearch: 500, + }, + }, + ], + profiles: [ + { + name: "vector-profile", + algorithmConfigurationName: "hnsw-config", + compressionName: "compression-config", + }, + ], + compressions: [ + { + compressionName: "compression-config", + kind: "scalarQuantization", + parameters: { + quantizedDataType: "int8", + }, + rescoringOptions: { + enableRescoring: true, + defaultOversampling: 10.0, + }, + truncationDimension: 512, // New feature: dimension truncation + }, + ], + }; + + const fields: SearchField[] = [ + { name: "id", type: "Edm.String", key: true, searchable: false }, + { name: "content", type: "Edm.String", searchable: true }, + { + name: "embedding", + type: "Collection(Edm.Single)", + searchable: true, + vectorSearchDimensions: 1024, + vectorSearchProfileName: "vector-profile", + }, + ]; + + const index: SearchIndex = { + name: `${Date.now()}-hnsw-compression`, + fields, + vectorSearch, + }; + + console.log("Creating index with vector compression..."); + await indexClient.createIndex(index); + + console.log("Retrieving index to verify compression settings..."); + const retrievedIndex = await indexClient.getIndex(index.name); + + // Verify compression settings + const compression = retrievedIndex.vectorSearch?.compressions?.[0]; + expect(compression?.truncationDimension).toBe(512); + expect(compression?.rescoringOptions?.enableRescoring).toBe(true); + expect(compression?.rescoringOptions?.defaultOversampling).toBe(10.0); + + await indexClient.deleteIndex(index.name); + }); + + it("Search Index description", async () => { + const indexName = `${Date.now()}-description`; + await indexClient.createIndex({ + name: indexName, + fields: [ + { + name: "id", + type: "Edm.String", + key: true, + searchable: false, + }, + ], + description: "Test index with description", + }); + const retrievedIndex = await indexClient.getIndex(indexName); + expect(retrievedIndex.description).toBe("Test index with description"); + await indexClient.deleteIndex(indexName); + }); +}); diff --git a/sdk/search/test-resources.bicep b/sdk/search/test-resources.bicep index 3b0ef12623d1..142411304ef2 100644 --- a/sdk/search/test-resources.bicep +++ b/sdk/search/test-resources.bicep @@ -7,7 +7,7 @@ resource searchService 'Microsoft.Search/searchServices@2024-06-01-preview' = { name: baseName location: resourceGroup().location sku: { - name: 'basic' + name: 'standard' } properties: { disableLocalAuth: !supportsSafeSecretStandard @@ -22,7 +22,6 @@ resource adminRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01 properties: { roleDefinitionId: subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'b24988ac-6180-42a0-ab88-20f7382dd24c') principalId: testApplicationOid - principalType: 'ServicePrincipal' } } From 1da946755210c50b7d3b21277797fd733ae6460e Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 20:47:10 +0000 Subject: [PATCH 07/21] implement GA features --- .../src/generated/data/models/index.ts | 2 +- sdk/search/search-documents/src/index.ts | 6 ++++++ sdk/search/search-documents/src/indexModels.ts | 5 +++++ sdk/search/search-documents/src/serviceModels.ts | 15 +++++++++++++++ sdk/search/search-documents/src/serviceUtils.ts | 8 +++++++- 5 files changed, 34 insertions(+), 2 deletions(-) diff --git a/sdk/search/search-documents/src/generated/data/models/index.ts b/sdk/search/search-documents/src/generated/data/models/index.ts index 818ff6392e98..71f741cac84d 100644 --- a/sdk/search/search-documents/src/generated/data/models/index.ts +++ b/sdk/search/search-documents/src/generated/data/models/index.ts @@ -690,7 +690,7 @@ export enum KnownVectorQueryKind { /** Vector query where a raw vector value is provided. */ Vector = "vector", /** Vector query where a text value that needs to be vectorized is provided. */ - $DO_NOT_NORMALIZE$_text = "text", + Text = "text", } /** diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 70f856fa2d41..4d9110f2027c 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -42,6 +42,7 @@ export { CorsOptions, CustomEntity, CustomEntityAlias, + CustomNormalizer, DataChangeDetectionPolicy as BaseDataChangeDetectionPolicy, DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy, DefaultCognitiveServicesAccount, @@ -81,10 +82,12 @@ export { KnownIndexerExecutionEnvironment, KnownIndexProjectionMode, KnownKeyPhraseExtractionSkillLanguage, + KnownLexicalNormalizerName as KnownLexicalNormalizerNames, KnownLexicalTokenizerName as KnownTokenizerNames, KnownOcrLineEnding, KnownOcrSkillLanguage, KnownPIIDetectionSkillMaskingMode, + KnownRankingOrder, KnownRegexFlags, KnownSearchFieldDataType, KnownSearchIndexerDataSourceType, @@ -106,6 +109,8 @@ export { LexicalAnalyzerName, LexicalTokenizer as BaseLexicalTokenizer, LexicalTokenizerName, + LexicalNormalizerName, + LexicalNormalizer as BaseLexicalNormalizer, LimitTokenFilter, LuceneStandardAnalyzer, MagnitudeScoringFunction, @@ -321,6 +326,7 @@ export { KeywordTokenizer, KnownAnalyzerNames, LexicalAnalyzer, + LexicalNormalizer, LexicalTokenizer, ListDataSourceConnectionsOptions, ListIndexersOptions, diff --git a/sdk/search/search-documents/src/indexModels.ts b/sdk/search/search-documents/src/indexModels.ts index 025271bb80c4..3743129ce50e 100644 --- a/sdk/search/search-documents/src/indexModels.ts +++ b/sdk/search/search-documents/src/indexModels.ts @@ -381,6 +381,11 @@ export type SearchResult< * NOTE: This property will not be serialized. It can only be populated by the server. */ readonly rerankerScore?: number; + /** + * The relevance score computed by boosting the Reranker Score. Search results are sorted by the RerankerScore/RerankerBoostedScore based on useScoringProfileBoostedRanking in the Semantic Config. RerankerBoostedScore is only returned for queries of type 'semantic' + * NOTE: This property will not be serialized. It can only be populated by the server. + */ + readonly rerankerBoostedScore?: number; /** * Text fragments from the document that indicate the matching search terms, organized by each * applicable field; null if hit highlighting was not enabled for the query. diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index 684f415a510e..ac144214fe45 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -63,6 +63,7 @@ import { LanguageDetectionSkill, LengthTokenFilter, LexicalAnalyzerName, + LexicalNormalizer as BaseLexicalNormalizer, LexicalTokenizerName, LimitTokenFilter, LuceneStandardAnalyzer, @@ -110,6 +111,7 @@ import { VectorSearchProfile, VectorSearchVectorizerKind, WordDelimiterTokenFilter, + CustomNormalizer, } from "./generated/service/models/index.js"; /** @@ -535,6 +537,11 @@ export type LexicalAnalyzer = | LuceneStandardAnalyzer | StopAnalyzer; +/** + * Contains the possible cases for LexicalNormalizer. + */ +export type LexicalNormalizer = BaseLexicalNormalizer | CustomNormalizer; + /** * A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call * your custom code. @@ -973,6 +980,10 @@ export interface SimpleField { * The encoding format to interpret the field contents. */ vectorEncodingFormat?: VectorEncodingFormat; + /** + * + */ + normalizerName?: string; } export function isComplexField(field: SearchField): field is ComplexField { @@ -1088,6 +1099,10 @@ export interface SearchIndex { * The tokenizers for the index. */ tokenizers?: LexicalTokenizer[]; + /** + * The normalizers for the index. + */ + normalizers?: LexicalNormalizer[]; /** * The token filters for the index. */ diff --git a/sdk/search/search-documents/src/serviceUtils.ts b/sdk/search/search-documents/src/serviceUtils.ts index 042ac23b2b86..04b4b1678a0e 100644 --- a/sdk/search/search-documents/src/serviceUtils.ts +++ b/sdk/search/search-documents/src/serviceUtils.ts @@ -246,7 +246,8 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie const type: SearchFieldDataType = field.type as SearchFieldDataType; const synonymMapNames: string[] | undefined = field.synonymMaps; - const { retrievable, analyzer, searchAnalyzer, indexAnalyzer, ...restField } = field; + const { retrievable, analyzer, searchAnalyzer, indexAnalyzer, normalizer, ...restField } = + field; const hidden = typeof retrievable === "boolean" ? !retrievable : retrievable; const result: SimpleField = { @@ -254,6 +255,7 @@ export function convertFieldsToPublic(fields: GeneratedSearchField[]): SearchFie type, hidden, analyzerName: analyzer, + normalizerName: normalizer, searchAnalyzerName: searchAnalyzer, indexAnalyzerName: indexAnalyzer, synonymMapNames, @@ -283,6 +285,7 @@ export function convertFieldsToGenerated(fields: SearchField[]): GeneratedSearch facetable: field.facetable ?? false, sortable: field.sortable ?? false, analyzer: field.analyzerName, + normalizer: field.normalizerName, searchAnalyzer: field.searchAnalyzerName, indexAnalyzer: field.indexAnalyzerName, synonymMaps: field.synonymMapNames, @@ -415,6 +418,7 @@ export function generatedIndexToPublicIndex(generatedIndex: GeneratedSearchIndex analyzers: convertAnalyzersToPublic(generatedIndex.analyzers), tokenizers: convertTokenizersToPublic(generatedIndex.tokenizers), tokenFilters: generatedIndex.tokenFilters as TokenFilter[], + normalizers: generatedIndex.normalizers, charFilters: generatedIndex.charFilters as CharFilter[], scoringProfiles: generatedIndex.scoringProfiles as ScoringProfile[], fields: convertFieldsToPublic(generatedIndex.fields), @@ -511,12 +515,14 @@ export function generatedSearchResultToPublicSearchResult< _highlights: highlights, _rerankerScore: rerankerScore, _captions: captions, + rerankerBoostedScore, ...restProps } = result; const obj = { score, highlights, rerankerScore, + rerankerBoostedScore, captions, document: restProps, }; From 65058b2d81229e0f46663cf40652448656402451 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 20:51:24 +0000 Subject: [PATCH 08/21] updates --- sdk/search/search-documents/package.json | 2 +- .../search-documents-browser.api.diff.md | 2 +- .../review/search-documents-node.api.md | 41 ++- .../test/public/node/1220GA.spec.ts | 252 +++++++++++++++++- 4 files changed, 282 insertions(+), 15 deletions(-) diff --git a/sdk/search/search-documents/package.json b/sdk/search/search-documents/package.json index d549c1a6a498..66498dff2447 100644 --- a/sdk/search/search-documents/package.json +++ b/sdk/search/search-documents/package.json @@ -21,7 +21,7 @@ "pack": "pnpm pack 2>&1", "test": "npm run test:node && npm run test:browser", "test:browser": "npm run clean && dev-tool run build-package && dev-tool run build-test && dev-tool run test:vitest --browser", - "test:node": "dev-tool run test:vitest --test-proxy-debug -- 1220GA", + "test:node": "dev-tool run test:vitest --test-proxy-debug", "update-snippets": "echo Skipped." }, "files": [ diff --git a/sdk/search/search-documents/review/search-documents-browser.api.diff.md b/sdk/search/search-documents/review/search-documents-browser.api.diff.md index cdfe41fb9894..6c1ddb60e4c8 100644 --- a/sdk/search/search-documents/review/search-documents-browser.api.diff.md +++ b/sdk/search/search-documents/review/search-documents-browser.api.diff.md @@ -7,7 +7,7 @@ For the complete API surface, see the corresponding -node.api.md file. =================================================================== --- NodeJS +++ browser -@@ -352,9 +352,9 @@ +@@ -358,9 +358,9 @@ // @public export type CreateSkillsetOptions = OperationOptions; diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index a879dcf02584..fd594e0243ef 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -133,6 +133,12 @@ export interface BaseLexicalAnalyzer { odatatype: "#Microsoft.Azure.Search.CustomAnalyzer" | "#Microsoft.Azure.Search.PatternAnalyzer" | "#Microsoft.Azure.Search.StandardAnalyzer" | "#Microsoft.Azure.Search.StopAnalyzer"; } +// @public +export interface BaseLexicalNormalizer { + name: string; + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; +} + // @public export interface BaseLexicalTokenizer { name: string; @@ -405,6 +411,13 @@ export interface CustomEntityLookupSkill extends BaseSearchIndexerSkill { // @public (undocumented) export type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`; +// @public +export interface CustomNormalizer extends BaseLexicalNormalizer { + charFilters?: CharFilterName[]; + odatatype: "#Microsoft.Azure.Search.CustomNormalizer"; + tokenFilters?: TokenFilterName[]; +} + // @public export type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy; @@ -1125,6 +1138,15 @@ export enum KnownKeyPhraseExtractionSkillLanguage { Sv = "sv" } +// @public +export enum KnownLexicalNormalizerNames { + AsciiFolding = "asciifolding", + Elision = "elision", + Lowercase = "lowercase", + Standard = "standard", + Uppercase = "uppercase" +} + // @public export enum KnownOcrLineEnding { CarriageReturn = "carriageReturn", @@ -1313,6 +1335,12 @@ export enum KnownPIIDetectionSkillMaskingMode { Replace = "replace" } +// @public +export enum KnownRankingOrder { + BoostedRerankerScore = "BoostedRerankerScore", + ReRankerScore = "RerankerScore" +} + // @public export enum KnownRegexFlags { CanonEq = "CANON_EQ", @@ -1585,7 +1613,7 @@ export enum KnownVectorFilterMode { // @public export enum KnownVectorQueryKind { - $DO_NOT_NORMALIZE$_text = "text", + Text = "text", Vector = "vector" } @@ -1651,6 +1679,12 @@ export type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneStandardA // @public export type LexicalAnalyzerName = string; +// @public +export type LexicalNormalizer = BaseLexicalNormalizer | CustomNormalizer; + +// @public +export type LexicalNormalizerName = string; + // @public export type LexicalTokenizer = ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizer | PatternTokenizer | LuceneStandardTokenizer | UaxUrlEmailTokenizer; @@ -2006,10 +2040,12 @@ export interface SearchIndex { charFilters?: CharFilter[]; corsOptions?: CorsOptions; defaultScoringProfile?: string; + description?: string; encryptionKey?: SearchResourceEncryptionKey; etag?: string; fields: SearchField[]; name: string; + normalizers?: LexicalNormalizer[]; scoringProfiles?: ScoringProfile[]; semanticSearch?: SemanticSearch; similarity?: SimilarityAlgorithm; @@ -2359,6 +2395,7 @@ export interface SearchResourceEncryptionKey { export type SearchResult = SelectFields> = { readonly score: number; readonly rerankerScore?: number; + readonly rerankerBoostedScore?: number; readonly highlights?: { [k in SelectFields]?: string[]; }; @@ -2504,6 +2541,8 @@ export interface SimpleField { indexAnalyzerName?: LexicalAnalyzerName; key?: boolean; name: string; + // (undocumented) + normalizerName?: string; searchable?: boolean; searchAnalyzerName?: LexicalAnalyzerName; sortable?: boolean; diff --git a/sdk/search/search-documents/test/public/node/1220GA.spec.ts b/sdk/search/search-documents/test/public/node/1220GA.spec.ts index 22b2daf2a533..c6aa4abcd80c 100644 --- a/sdk/search/search-documents/test/public/node/1220GA.spec.ts +++ b/sdk/search/search-documents/test/public/node/1220GA.spec.ts @@ -1,27 +1,48 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -import { +import type { VectorSearch, SearchField, SearchIndex, - IndexDocumentsClient, - SearchClient, + HnswParameters, + LexicalNormalizer, + CustomNormalizer, +} from "@azure/search-documents"; +import { SearchIndexClient, AzureKeyCredential, SearchIndexerClient, + SearchClient, + KnownRankingOrder, } from "@azure/search-documents"; -import { describe, beforeEach, it, expect } from "vitest"; -import config from "dotenv/config"; -import { afterEach } from "node:test"; +import { describe, it, expect, afterEach } from "vitest"; +import type { PipelinePolicy } from "@azure/core-rest-pipeline"; // Test index names (prefixed to avoid conflicts) const credential = new AzureKeyCredential(process.env.API_KEY!); const indexClient = new SearchIndexClient(process.env.ENDPOINT!, credential); -const indexerClient = new SearchIndexerClient(process.env.ENDPOINT!, credential); -describe("1220GA", () => { +const debugPolicy: PipelinePolicy = { + name: "debugPolicy", + async sendRequest(request, next) { + console.log("Request:"); + console.log(JSON.stringify(request, null, 2)); + const response = await next(request); + console.log("Response:"); + console.log(JSON.stringify(response, null, 2)); + return response; + }, +}; + +describe.skip("1220GA", () => { + // Using this file to live test new features locally + afterEach(async () => { + for await (const index of indexClient.listIndexes()) { + await indexClient.deleteIndex(index.name); + } + }); it("Vector Search HNSW storage optimization", async () => { const vectorSearch: VectorSearch = { algorithms: [ @@ -77,10 +98,8 @@ describe("1220GA", () => { vectorSearch, }; - console.log("Creating index with vector compression..."); await indexClient.createIndex(index); - console.log("Retrieving index to verify compression settings..."); const retrievedIndex = await indexClient.getIndex(index.name); // Verify compression settings @@ -89,7 +108,11 @@ describe("1220GA", () => { expect(compression?.rescoringOptions?.enableRescoring).toBe(true); expect(compression?.rescoringOptions?.defaultOversampling).toBe(10.0); - await indexClient.deleteIndex(index.name); + // Verify algorithm settings + const parameters = retrievedIndex.vectorSearch?.algorithms?.[0].parameters as HnswParameters; + expect(parameters.m).toBe(4); + expect(parameters.efConstruction).toBe(400); + expect(parameters.efSearch).toBe(500); }); it("Search Index description", async () => { @@ -108,6 +131,211 @@ describe("1220GA", () => { }); const retrievedIndex = await indexClient.getIndex(indexName); expect(retrievedIndex.description).toBe("Test index with description"); - await indexClient.deleteIndex(indexName); + }); + + it("Semantic Scoring Profiles", async () => { + const indexName = `${Date.now()}-semantic`; + + const fields: SearchField[] = [ + { name: "id", type: "Edm.String", key: true, searchable: false }, + { name: "content", type: "Edm.String", searchable: true }, + { + name: "embedding", + type: "Collection(Edm.Single)", + searchable: true, + vectorSearchDimensions: 1024, + vectorSearchProfileName: "vector-profile", + }, + ]; + + console.log("Creating index with semantic configurations..."); + // indexClient["client"].pipeline.addPolicy(debugPolicy); + await indexClient.createIndex({ + name: indexName, + fields, + vectorSearch: { + algorithms: [ + { + name: "hnsw-config", + kind: "hnsw", + parameters: { + metric: "cosine", + m: 4, + efConstruction: 400, + efSearch: 500, + }, + }, + ], + profiles: [ + { + name: "vector-profile", + algorithmConfigurationName: "hnsw-config", + }, + ], + }, + semanticSearch: { + configurations: [ + { + name: "semantic-config-1", + prioritizedFields: { + titleField: { name: "content" }, + contentFields: [{ name: "content" }], + }, + rankingOrder: KnownRankingOrder.BoostedRerankerScore, // New feature: ranking order + }, + { + name: "semantic-config-2", + prioritizedFields: { + titleField: { name: "content" }, + contentFields: [{ name: "content" }], + }, + rankingOrder: KnownRankingOrder.ReRankerScore, // Different ranking order + }, + ], + }, + }); + + const searchClient = new SearchClient(process.env.ENDPOINT!, indexName, credential); + + const testDoc = { + id: "semantic-doc-1", + content: "This is a test document for semantic search with Azure Cognitive Search", + embedding: Array(1024) + .fill(0) + .map(() => Math.random()), + }; + + console.log("Uploading document..."); + // searchClient["client"].pipeline.addPolicy(debugPolicy); + await searchClient.uploadDocuments([testDoc]); + + await new Promise((resolve) => setTimeout(resolve, 2000)); // Wait for indexing + + console.log("Performing semantic search with configuration 'semantic-config-1'..."); + const semanticResults = await searchClient.search("semantic search", { + semanticSearchOptions: { + configurationName: "semantic-config-1", + }, + queryType: "semantic", + }); + + for await (const result of semanticResults.results) { + console.log(result); + } + }); + + it("Multi-Vectors", () => { + const fields: SearchField[] = [ + { name: "id", type: "Edm.String", key: true, searchable: false }, + { name: "content", type: "Edm.String", searchable: true }, + { + name: "embedding1", + type: "Collection(Edm.Single)", + searchable: true, + vectorSearchDimensions: 1024, + vectorSearchProfileName: "vector-profile", + }, + { + name: "embedding2", + type: "Collection(Edm.Single)", + searchable: true, + vectorSearchDimensions: 512, + vectorSearchProfileName: "vector-profile", + }, + ]; + const vectorSearch: VectorSearch = { + algorithms: [ + { + name: "hnsw-config", + kind: "hnsw", + parameters: { + metric: "cosine", + m: 4, + efConstruction: 400, + efSearch: 500, + }, + }, + ], + profiles: [ + { + name: "vector-profile", + algorithmConfigurationName: "hnsw-config", + }, + ], + }; + + const index: SearchIndex = { + name: `${Date.now()}-multi-vectors`, + fields, + vectorSearch, + }; + + expect(index.fields.length).toBe(4); + }); + it("Normalizers", async () => { + const normalizers: LexicalNormalizer[] = [ + { + name: "custom-normalizer", + odatatype: "#Microsoft.Azure.Search.CustomNormalizer", + charFilters: [], + tokenFilters: ["lowercase", "asciifolding"], + }, + ]; + + const fields: SearchField[] = [ + { name: "id", type: "Edm.String", key: true, searchable: false }, + { + name: "title", + type: "Edm.String", + searchable: true, + filterable: true, // Required for normalizer + normalizerName: "lowercase", // Use built-in normalizer + }, + { + name: "description", + type: "Edm.String", + searchable: true, + sortable: true, // Required for normalizer + normalizerName: "custom-normalizer", + }, + ]; + + const normalizerIndex: SearchIndex = { + name: `${Date.now()}-normalizers`, + description: "Test index for normalizer functionality", + fields, + normalizers, + }; + + console.log("Creating index with normalizers..."); + console.log("Normalizers being sent:", JSON.stringify(normalizers, null, 2)); + console.log( + "Fields being sent:", + JSON.stringify( + fields.map((f) => ({ name: f.name, normalizer: (f as any).normalizerName || null })), + null, + 2, + ), + ); + await indexClient.createIndex(normalizerIndex); + + const retrievedIndex = await indexClient.getIndex(normalizerIndex.name); + console.log("Retrieved index:", JSON.stringify(retrievedIndex, null, 2)); + + // Verify normalizer settings + const retrievedNormalizers = retrievedIndex.normalizers || []; + expect(retrievedNormalizers.length).toBe(1); + expect(retrievedNormalizers[0].name).toBe("custom-normalizer"); + expect(retrievedNormalizers[0].odatatype).toBe("#Microsoft.Azure.Search.CustomNormalizer"); + const tokenFilters = (retrievedNormalizers[0] as CustomNormalizer).tokenFilters; + expect(tokenFilters).toContain("lowercase"); + expect(tokenFilters).toContain("asciifolding"); + + // Verify field normalizer assignments + const titleField = retrievedIndex.fields.find((f) => f.name === "title"); + expect((titleField as any).normalizerName).toBe("lowercase"); + + const descriptionField = retrievedIndex.fields.find((f) => f.name === "description"); + expect((descriptionField as any).normalizerName).toBe("custom-normalizer"); }); }); From c19c5289b255d4101b261ed7b9f4b13472003c5a Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 20:54:07 +0000 Subject: [PATCH 09/21] fixup! updates --- .../search-documents/review/search-documents-node.api.md | 1 - sdk/search/search-documents/src/serviceModels.ts | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index fd594e0243ef..9f19953f2085 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -2541,7 +2541,6 @@ export interface SimpleField { indexAnalyzerName?: LexicalAnalyzerName; key?: boolean; name: string; - // (undocumented) normalizerName?: string; searchable?: boolean; searchAnalyzerName?: LexicalAnalyzerName; diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index ac144214fe45..42ec54e59d01 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -980,8 +980,9 @@ export interface SimpleField { * The encoding format to interpret the field contents. */ vectorEncodingFormat?: VectorEncodingFormat; - /** - * + /** The name of the normalizer to use for the field. This option can be used only with fields with + * filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed + * for the field. Must be null for complex fields. */ normalizerName?: string; } From 66be9070a58d686b7b8969f32b8d22db4fea9279 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 21:04:29 +0000 Subject: [PATCH 10/21] changelog and versions --- sdk/search/search-documents/CHANGELOG.md | 15 +++++++++++++++ sdk/search/search-documents/package.json | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/sdk/search/search-documents/CHANGELOG.md b/sdk/search/search-documents/CHANGELOG.md index 89da8343159c..5951dc33db84 100644 --- a/sdk/search/search-documents/CHANGELOG.md +++ b/sdk/search/search-documents/CHANGELOG.md @@ -1,5 +1,20 @@ # Release History +## 12.2.0 (Unreleased) + +### Features Added + +- Added support for `2025-09-01` service version. + - Support for running `VectorQuery`s against sub-fields of complex fields. + - Support for reranker boosted scores in search results and the ability to sort results on either reranker or reranker + boosted scores in `SemanticConfiguration.rankingOrder`. + - Support for `VectorSearchCompression.RescoringOptions` to configure how vector compression handles the original + vector when indexing and how vectors are used during rescoring. + - Added `SearchIndex.description` to provide a textual description of the index. + - Support for `LexicalNormalizer` when defining `SearchIndex`, `SimpleField`, and `SearchableField` and the ability to + use it when analyzing text with `SearchIndexClient.analyzeText` and `SearchIndexAsyncClient.analyzeText`. + - Support `DocumentIntelligenceLayoutSkill` skillset skill and `OneLake` `SearchIndexerDataSourceConnection` data source. + ## 12.2.0-beta.2 (2024-11-25) ### Features Added diff --git a/sdk/search/search-documents/package.json b/sdk/search/search-documents/package.json index 66498dff2447..04e1df66d211 100644 --- a/sdk/search/search-documents/package.json +++ b/sdk/search/search-documents/package.json @@ -1,6 +1,6 @@ { "name": "@azure/search-documents", - "version": "12.2.0-beta.2", + "version": "12.2.0", "description": "Azure client library to use Cognitive Search for node.js and browser.", "sdk-type": "client", "main": "./dist/commonjs/index.js", From ac1425578d5b7e2f2aa6f1ad0117f2b33f57a8ce Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 21:40:46 +0000 Subject: [PATCH 11/21] restore tests and such --- sdk/search/search-documents/eslint.config.mjs | 3 +++ .../search-documents/test/public/utils/recordedClient.ts | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/sdk/search/search-documents/eslint.config.mjs b/sdk/search/search-documents/eslint.config.mjs index 197f0c237a9d..53ff06d561e4 100644 --- a/sdk/search/search-documents/eslint.config.mjs +++ b/sdk/search/search-documents/eslint.config.mjs @@ -2,6 +2,9 @@ import azsdkEslint from "@azure/eslint-plugin-azure-sdk"; export default azsdkEslint.config([ { + rules: { + "@azure/azure-sdk/ts-naming-options": "warn", + }, files: ["**/*.ts", "**/*.cts", "**/*.mts"], languageOptions: { parserOptions: { diff --git a/sdk/search/search-documents/test/public/utils/recordedClient.ts b/sdk/search/search-documents/test/public/utils/recordedClient.ts index 5ee282afb53b..9851f64707ad 100644 --- a/sdk/search/search-documents/test/public/utils/recordedClient.ts +++ b/sdk/search/search-documents/test/public/utils/recordedClient.ts @@ -97,9 +97,10 @@ export async function createClients( indexName = recorder.variable("TEST_INDEX_NAME", indexName); - const credential = new AzureKeyCredential(assertEnvironmentVariable("API_KEY")); + const credential = createTestCredential(); const endPoint: string = assertEnvironmentVariable("ENDPOINT"); + const openAIEndpoint = assertEnvironmentVariable("AZURE_OPENAI_ENDPOINT"); const searchClient = new SearchClient( endPoint, @@ -123,7 +124,11 @@ export async function createClients( serviceVersion, }), ); - const openAIClient = new OpenAIClient("", credential, recorder.configureClientOptions({})); + const openAIClient = new OpenAIClient( + openAIEndpoint, + credential, + recorder.configureClientOptions({}), + ); return { searchClient, From ef2063421b44eb99a38a6c835a726806a70d2639 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 22:05:41 +0000 Subject: [PATCH 12/21] cleanup --- sdk/search/search-documents/src/serviceModels.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index 42ec54e59d01..59a8a5b10d89 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -1046,7 +1046,6 @@ export interface SynonymMap { * as needed during iteration. Use .byPage() to make one request to the server * per iteration. */ -// eslint-disable-next-line @typescript-eslint/ban-types export type IndexIterator = PagedAsyncIterableIterator; /** @@ -1054,7 +1053,6 @@ export type IndexIterator = PagedAsyncIterableIterator; /** From ef27c8cdacd5bb2f045b4c209a37b6aa17154e7c Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 22:12:35 +0000 Subject: [PATCH 13/21] wip --- .../search-documents/test/public/node/1220GA.spec.ts | 2 -- .../test/public/node/searchClient.spec.ts | 10 +++++----- .../test/public/node/searchIndexClient.spec.ts | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/sdk/search/search-documents/test/public/node/1220GA.spec.ts b/sdk/search/search-documents/test/public/node/1220GA.spec.ts index c6aa4abcd80c..c56730a979b5 100644 --- a/sdk/search/search-documents/test/public/node/1220GA.spec.ts +++ b/sdk/search/search-documents/test/public/node/1220GA.spec.ts @@ -19,8 +19,6 @@ import { import { describe, it, expect, afterEach } from "vitest"; import type { PipelinePolicy } from "@azure/core-rest-pipeline"; -// Test index names (prefixed to avoid conflicts) - const credential = new AzureKeyCredential(process.env.API_KEY!); const indexClient = new SearchIndexClient(process.env.ENDPOINT!, credential); diff --git a/sdk/search/search-documents/test/public/node/searchClient.spec.ts b/sdk/search/search-documents/test/public/node/searchClient.spec.ts index db6f1825de45..75bbb7d0238c 100644 --- a/sdk/search/search-documents/test/public/node/searchClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchClient.spec.ts @@ -65,7 +65,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { }); // TODO: the preview-only tests are mixed in here when they should be in another describe (and removed in the stable release branch) - describe("stable", () => { + describe("stable", { skip: true }, () => { let recorder: Recorder; let searchClient: SearchClient; let indexClient: SearchIndexClient; @@ -104,7 +104,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { }, }) as const; - it.skip("search with speller", async () => { + it("search with speller", async () => { const searchResults = await searchClient.search("budjet", { skip: 0, top: 5, @@ -115,7 +115,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(searchResults.count, 6); }); - it.skip("search with semantic ranking", async () => { + it("search with semantic ranking", async () => { const searchResults = await searchClient.search("luxury", { ...baseSemanticOptions(), skip: 0, @@ -125,7 +125,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { assert.equal(searchResults.count, 1); }); - it.skip("search with document debug info", async () => { + it("search with document debug info", async () => { const baseOptions = baseSemanticOptions(); const options = { ...baseOptions, @@ -169,7 +169,7 @@ describe("SearchClient", { timeout: 20_000 }, () => { } }); - it.skip("search with answers", async () => { + it("search with answers", async () => { const baseOptions = baseSemanticOptions(); const options = { ...baseOptions, diff --git a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts index 36dc8f33c62b..b3d34cfc1c5e 100644 --- a/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts +++ b/sdk/search/search-documents/test/public/node/searchIndexClient.spec.ts @@ -62,7 +62,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - describe("stable", () => { + describe("stable", { skip: true }, () => { let recorder: Recorder; let indexClient: SearchIndexClient; let TEST_INDEX_NAME: string; @@ -264,7 +264,7 @@ describe("SearchIndexClient", { timeout: 20_000 }, () => { }); }); - it.skip("creates the index object vector fields", async () => { + it("creates the index object vector fields", async () => { const indexName: string = isLiveMode() ? createRandomIndexName() : "hotel-live-test4"; const algorithm: VectorSearchAlgorithmConfiguration = { From 6baff659ab7151cc9039d72b7f3cbb68469d14ce Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Wed, 1 Oct 2025 22:29:35 +0000 Subject: [PATCH 14/21] wip --- sdk/search/search-documents/test/public/node/1220GA.spec.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/sdk/search/search-documents/test/public/node/1220GA.spec.ts b/sdk/search/search-documents/test/public/node/1220GA.spec.ts index c56730a979b5..34a10987fdab 100644 --- a/sdk/search/search-documents/test/public/node/1220GA.spec.ts +++ b/sdk/search/search-documents/test/public/node/1220GA.spec.ts @@ -19,9 +19,6 @@ import { import { describe, it, expect, afterEach } from "vitest"; import type { PipelinePolicy } from "@azure/core-rest-pipeline"; -const credential = new AzureKeyCredential(process.env.API_KEY!); -const indexClient = new SearchIndexClient(process.env.ENDPOINT!, credential); - const debugPolicy: PipelinePolicy = { name: "debugPolicy", async sendRequest(request, next) { @@ -35,6 +32,8 @@ const debugPolicy: PipelinePolicy = { }; describe.skip("1220GA", () => { + const credential = new AzureKeyCredential(process.env.API_KEY! ?? "api-key"); + const indexClient = new SearchIndexClient(process.env.ENDPOINT!, credential); // Using this file to live test new features locally afterEach(async () => { for await (const index of indexClient.listIndexes()) { From 48163d27f36a3afa6eea9cbb088b62e67f46d69d Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 16:17:09 +0000 Subject: [PATCH 15/21] update swagger, fix transform --- .../search-documents-browser.api.diff.md | 2 +- .../review/search-documents-node.api.md | 2 ++ .../src/generated/service/models/index.ts | 4 ++++ .../src/generated/service/models/mappers.ts | 14 +++++++++++++ sdk/search/search-documents/swagger/Data.md | 10 ++++++++-- .../search-documents/swagger/Service.md | 20 +++++++++++++++++++ 6 files changed, 49 insertions(+), 3 deletions(-) diff --git a/sdk/search/search-documents/review/search-documents-browser.api.diff.md b/sdk/search/search-documents/review/search-documents-browser.api.diff.md index 6c1ddb60e4c8..5835636e9eed 100644 --- a/sdk/search/search-documents/review/search-documents-browser.api.diff.md +++ b/sdk/search/search-documents/review/search-documents-browser.api.diff.md @@ -7,7 +7,7 @@ For the complete API surface, see the corresponding -node.api.md file. =================================================================== --- NodeJS +++ browser -@@ -358,9 +358,9 @@ +@@ -360,9 +360,9 @@ // @public export type CreateSkillsetOptions = OperationOptions; diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index 9f19953f2085..e6ad8ef49fa0 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -216,7 +216,9 @@ export interface BaseVectorSearchAlgorithmConfiguration { // @public export interface BaseVectorSearchCompression { compressionName: string; + defaultOversampling?: number; kind: "scalarQuantization" | "binaryQuantization"; + rerankWithOriginalVectors?: boolean; // Warning: (ae-forgotten-export) The symbol "RescoringOptions" needs to be exported by the entry point index.d.ts rescoringOptions?: RescoringOptions; truncationDimension?: number; diff --git a/sdk/search/search-documents/src/generated/service/models/index.ts b/sdk/search/search-documents/src/generated/service/models/index.ts index 6acebc8ac184..5d8016ceafd5 100644 --- a/sdk/search/search-documents/src/generated/service/models/index.ts +++ b/sdk/search/search-documents/src/generated/service/models/index.ts @@ -1001,6 +1001,10 @@ export interface VectorSearchCompression { rescoringOptions?: RescoringOptions; /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */ truncationDimension?: number; + /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */ + rerankWithOriginalVectors?: boolean; + /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */ + defaultOversampling?: number; } /** Contains the options for rescoring. */ diff --git a/sdk/search/search-documents/src/generated/service/models/mappers.ts b/sdk/search/search-documents/src/generated/service/models/mappers.ts index ad03ad73887f..89363adaaf38 100644 --- a/sdk/search/search-documents/src/generated/service/models/mappers.ts +++ b/sdk/search/search-documents/src/generated/service/models/mappers.ts @@ -2436,6 +2436,20 @@ export const VectorSearchCompression: coreClient.CompositeMapper = { name: "Number", }, }, + rerankWithOriginalVectors: { + serializedName: "rerankWithOriginalVectors", + nullable: true, + type: { + name: "Boolean", + }, + }, + defaultOversampling: { + serializedName: "defaultOversampling", + nullable: true, + type: { + name: "Number", + }, + }, }, }, }; diff --git a/sdk/search/search-documents/swagger/Data.md b/sdk/search/search-documents/swagger/Data.md index 73ef30e2f4c6..c7b543914b06 100644 --- a/sdk/search/search-documents/swagger/Data.md +++ b/sdk/search/search-documents/swagger/Data.md @@ -71,13 +71,19 @@ modelerfour: ActionType: $DO_NOT_NORMALIZE$__actionType ``` -### Change text to \_text in SuggestResult +### Change text to _text only in SuggestResult ```yaml +directive: + - from: swagger-document + where: $.definitions.SuggestResult.properties["@search.text"] + transform: > + $["x-ms-client-name"] = "SuggestResult_text"; + modelerfour: naming: override: - Text: $DO_NOT_NORMALIZE$_text + SuggestResult_text: $DO_NOT_NORMALIZE$_text ``` ### Preserve underscore prefix in some result type properties diff --git a/sdk/search/search-documents/swagger/Service.md b/sdk/search/search-documents/swagger/Service.md index bf0efe8bd51a..0bf2e474b87f 100644 --- a/sdk/search/search-documents/swagger/Service.md +++ b/sdk/search/search-documents/swagger/Service.md @@ -507,3 +507,23 @@ directive: where: $.definitions.WebApiSkill.properties.authResourceId transform: $["x-ms-format"] = "arm-id"; ``` + +### Retain `rerankWithOriginalVectors` and `defaultOversampling` in `VectorSearchCompression` + +```yaml +directive: +- from: swagger-document + where: $.definitions.VectorSearchCompressionConfiguration + transform: > + $.properties.rerankWithOriginalVectors = { + "type": "boolean", + "description": "If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency.", + "x-nullable": true + }; + $.properties.defaultOversampling = { + "type": "number", + "format": "double", + "description": "Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency.", + "x-nullable": true + }; +``` From accc0445e309055002b04ef1cec18b0a867f7001 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 16:30:18 +0000 Subject: [PATCH 16/21] fixes --- .../search-documents/test/public/node/1220GA.spec.ts | 3 ++- sdk/search/search-documents/test/public/typeDefinitions.ts | 3 +-- .../search-documents/test/public/utils/recordedClient.ts | 7 +------ sdk/search/search-documents/test/public/utils/setup.ts | 4 ++-- 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/sdk/search/search-documents/test/public/node/1220GA.spec.ts b/sdk/search/search-documents/test/public/node/1220GA.spec.ts index 34a10987fdab..759c7da4fbf8 100644 --- a/sdk/search/search-documents/test/public/node/1220GA.spec.ts +++ b/sdk/search/search-documents/test/public/node/1220GA.spec.ts @@ -18,6 +18,7 @@ import { } from "@azure/search-documents"; import { describe, it, expect, afterEach } from "vitest"; import type { PipelinePolicy } from "@azure/core-rest-pipeline"; +import { isLiveMode } from "@azure-tools/test-recorder"; const debugPolicy: PipelinePolicy = { name: "debugPolicy", @@ -31,7 +32,7 @@ const debugPolicy: PipelinePolicy = { }, }; -describe.skip("1220GA", () => { +describe.skipIf(!isLiveMode())("1220GA", () => { const credential = new AzureKeyCredential(process.env.API_KEY! ?? "api-key"); const indexClient = new SearchIndexClient(process.env.ENDPOINT!, credential); // Using this file to live test new features locally diff --git a/sdk/search/search-documents/test/public/typeDefinitions.ts b/sdk/search/search-documents/test/public/typeDefinitions.ts index 7bb7816d0255..7baf43d1ea33 100644 --- a/sdk/search/search-documents/test/public/typeDefinitions.ts +++ b/sdk/search/search-documents/test/public/typeDefinitions.ts @@ -79,7 +79,6 @@ type BlobIndexerParsingMode = | "json" | "jsonArray" | "jsonLines" - | "markdown" | "text"; type BlobIndexerPDFTextRotationAlgorithm = "detectAngles" | "none"; type CustomEntityLookupSkillLanguage = "da" | "de" | "en" | "es" | "fi" | "fr" | "it" | "ko" | "pt"; @@ -525,7 +524,7 @@ type TextTranslationSkillLanguage = | "zh-Hans" | "zh-Hant"; type VectorFilterMode = "postFilter" | "preFilter"; -type VectorQueryKind = "imageBinary" | "imageUrl" | "text" | "vector"; +type VectorQueryKind = "text" | "vector"; type VectorSearchAlgorithmKind = "exhaustiveKnn" | "hnsw"; type VectorSearchAlgorithmMetric = "cosine" | "dotProduct" | "euclidean" | "hamming"; type VisualFeature = diff --git a/sdk/search/search-documents/test/public/utils/recordedClient.ts b/sdk/search/search-documents/test/public/utils/recordedClient.ts index 9851f64707ad..0132e8edc71b 100644 --- a/sdk/search/search-documents/test/public/utils/recordedClient.ts +++ b/sdk/search/search-documents/test/public/utils/recordedClient.ts @@ -6,12 +6,7 @@ import type { Recorder, RecorderStartOptions, SanitizerOptions } from "@azure-to import { assertEnvironmentVariable, env } from "@azure-tools/test-recorder"; import { isDefined } from "@azure/core-util"; import { OpenAIClient } from "@azure/openai"; -import { - AzureKeyCredential, - SearchClient, - SearchIndexClient, - SearchIndexerClient, -} from "../../../src/index.js"; +import { SearchClient, SearchIndexClient, SearchIndexerClient } from "../../../src/index.js"; export interface Clients { searchClient: SearchClient; diff --git a/sdk/search/search-documents/test/public/utils/setup.ts b/sdk/search/search-documents/test/public/utils/setup.ts index f9602918a80e..fbe9553aa5ee 100644 --- a/sdk/search/search-documents/test/public/utils/setup.ts +++ b/sdk/search/search-documents/test/public/utils/setup.ts @@ -33,8 +33,8 @@ export async function createIndex( kind: "azureOpenAI", vectorizerName: "vector-search-vectorizer", parameters: { - // deploymentId: assertEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME"), - // resourceUrl: assertEnvironmentVariable("AZURE_OPENAI_ENDPOINT"), + deploymentId: assertEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME"), + resourceUrl: assertEnvironmentVariable("AZURE_OPENAI_ENDPOINT"), modelName: "text-embedding-ada-002", }, }, From a6534a0a4af751c6e47b83ace21e273e6250e85c Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 18:56:57 +0000 Subject: [PATCH 17/21] copilot comments --- .../search-documents/review/search-documents-node.api.md | 8 +++++--- .../src/generated/service/models/index.ts | 2 +- sdk/search/search-documents/src/index.ts | 1 + sdk/search/search-documents/src/serviceModels.ts | 3 ++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index e6ad8ef49fa0..25746b6bf5ed 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -1340,7 +1340,7 @@ export enum KnownPIIDetectionSkillMaskingMode { // @public export enum KnownRankingOrder { BoostedRerankerScore = "BoostedRerankerScore", - ReRankerScore = "RerankerScore" + RerankerScore = "RerankerScore" } // @public @@ -1929,6 +1929,9 @@ export interface QueryCaptionResult { // @public export type QueryType = "simple" | "full" | "semantic"; +// @public +export type RankingOrder = string; + // @public (undocumented) export type RegexFlags = `${KnownRegexFlags}`; @@ -2430,7 +2433,6 @@ export type SelectFields = (() => T extends TModel ? t export interface SemanticConfiguration { name: string; prioritizedFields: SemanticPrioritizedFields; - // Warning: (ae-forgotten-export) The symbol "RankingOrder" needs to be exported by the entry point index.d.ts rankingOrder?: RankingOrder; } @@ -2543,7 +2545,7 @@ export interface SimpleField { indexAnalyzerName?: LexicalAnalyzerName; key?: boolean; name: string; - normalizerName?: string; + normalizerName?: LexicalNormalizerName; searchable?: boolean; searchAnalyzerName?: LexicalAnalyzerName; sortable?: boolean; diff --git a/sdk/search/search-documents/src/generated/service/models/index.ts b/sdk/search/search-documents/src/generated/service/models/index.ts index 5d8016ceafd5..0cdca08edece 100644 --- a/sdk/search/search-documents/src/generated/service/models/index.ts +++ b/sdk/search/search-documents/src/generated/service/models/index.ts @@ -2766,7 +2766,7 @@ export enum KnownRankingOrder { /** Sets sort order as BoostedRerankerScore */ BoostedRerankerScore = "BoostedRerankerScore", /** Sets sort order as ReRankerScore */ - ReRankerScore = "RerankerScore", + RerankerScore = "RerankerScore", } /** diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 4d9110f2027c..9aaa43467501 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -130,6 +130,7 @@ export { PatternReplaceTokenFilter, PhoneticEncoder, PhoneticTokenFilter, + RankingOrder, ResourceCounter, ScalarQuantizationCompression, ScalarQuantizationParameters, diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index 59a8a5b10d89..b62d9cfb3441 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -112,6 +112,7 @@ import { VectorSearchVectorizerKind, WordDelimiterTokenFilter, CustomNormalizer, + LexicalNormalizerName, } from "./generated/service/models/index.js"; /** @@ -984,7 +985,7 @@ export interface SimpleField { * filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed * for the field. Must be null for complex fields. */ - normalizerName?: string; + normalizerName?: LexicalNormalizerName; } export function isComplexField(field: SearchField): field is ComplexField { From 812a1086c7c35270f497043910ac3b1217fff593 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 19:11:27 +0000 Subject: [PATCH 18/21] feedback, missing exports --- .../search-documents-browser.api.diff.md | 2 +- .../review/search-documents-node.api.md | 77 ++++++++++++++++++- sdk/search/search-documents/src/index.ts | 28 +++++-- .../search-documents/src/serviceModels.ts | 12 +-- 4 files changed, 104 insertions(+), 15 deletions(-) diff --git a/sdk/search/search-documents/review/search-documents-browser.api.diff.md b/sdk/search/search-documents/review/search-documents-browser.api.diff.md index 5835636e9eed..dfaf7f3ddb3e 100644 --- a/sdk/search/search-documents/review/search-documents-browser.api.diff.md +++ b/sdk/search/search-documents/review/search-documents-browser.api.diff.md @@ -7,7 +7,7 @@ For the complete API surface, see the corresponding -node.api.md file. =================================================================== --- NodeJS +++ browser -@@ -360,9 +360,9 @@ +@@ -359,9 +359,9 @@ // @public export type CreateSkillsetOptions = OperationOptions; diff --git a/sdk/search/search-documents/review/search-documents-node.api.md b/sdk/search/search-documents/review/search-documents-node.api.md index 25746b6bf5ed..aaeb645911af 100644 --- a/sdk/search/search-documents/review/search-documents-node.api.md +++ b/sdk/search/search-documents/review/search-documents-node.api.md @@ -219,7 +219,6 @@ export interface BaseVectorSearchCompression { defaultOversampling?: number; kind: "scalarQuantization" | "binaryQuantization"; rerankWithOriginalVectors?: boolean; - // Warning: (ae-forgotten-export) The symbol "RescoringOptions" needs to be exported by the entry point index.d.ts rescoringOptions?: RescoringOptions; truncationDimension?: number; } @@ -500,6 +499,38 @@ export interface DocumentExtractionSkill extends BaseSearchIndexerSkill { parsingMode?: string; } +// @public +export interface DocumentIntelligenceLayoutSkill extends BaseSearchIndexerSkill { + chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties; + extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[]; + markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth; + odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"; + outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat; + outputMode?: DocumentIntelligenceLayoutSkillOutputMode; +} + +// @public +export interface DocumentIntelligenceLayoutSkillChunkingProperties { + maximumLength?: number; + overlapLength?: number; + unit?: DocumentIntelligenceLayoutSkillChunkingUnit; +} + +// @public +export type DocumentIntelligenceLayoutSkillChunkingUnit = string; + +// @public +export type DocumentIntelligenceLayoutSkillExtractionOptions = string; + +// @public +export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string; + +// @public +export type DocumentIntelligenceLayoutSkillOutputFormat = string; + +// @public +export type DocumentIntelligenceLayoutSkillOutputMode = string; + // @public export interface EdgeNGramTokenFilter { maxGram?: number; @@ -1008,6 +1039,32 @@ export enum KnownCustomEntityLookupSkillLanguage { Pt = "pt" } +// @public +export enum KnownDocumentIntelligenceLayoutSkillChunkingUnit { + Characters = "characters" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth { + H1 = "h1", + H2 = "h2", + H3 = "h3", + H4 = "h4", + H5 = "h5", + H6 = "h6" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillOutputFormat { + Markdown = "markdown", + Text = "text" +} + +// @public +export enum KnownDocumentIntelligenceLayoutSkillOutputMode { + OneToMany = "oneToMany" +} + // @public export enum KnownEntityCategory { Datetime = "datetime", @@ -1639,6 +1696,12 @@ export enum KnownVectorSearchCompressionKind { ScalarQuantization = "scalarQuantization" } +// @public +export enum KnownVectorSearchCompressionRescoreStorageMethod { + DiscardOriginals = "discardOriginals", + PreserveOriginals = "preserveOriginals" +} + // @public export enum KnownVectorSearchCompressionTarget { Int8 = "int8" @@ -1935,6 +1998,13 @@ export type RankingOrder = string; // @public (undocumented) export type RegexFlags = `${KnownRegexFlags}`; +// @public +export interface RescoringOptions { + defaultOversampling?: number; + enableRescoring?: boolean; + rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod; +} + // @public export type ResetIndexerOptions = OperationOptions; @@ -2269,7 +2339,7 @@ export interface SearchIndexerLimits { } // @public -export type SearchIndexerSkill = AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill; +export type SearchIndexerSkill = AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | DocumentIntelligenceLayoutSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill; // @public export interface SearchIndexerSkillset { @@ -2781,6 +2851,9 @@ export type VectorSearchCompression = BinaryQuantizationCompression | ScalarQuan // @public export type VectorSearchCompressionKind = string; +// @public +export type VectorSearchCompressionRescoreStorageMethod = string; + // @public export type VectorSearchCompressionTarget = string; diff --git a/sdk/search/search-documents/src/index.ts b/sdk/search/search-documents/src/index.ts index 9aaa43467501..4d06aeff6588 100644 --- a/sdk/search/search-documents/src/index.ts +++ b/sdk/search/search-documents/src/index.ts @@ -22,13 +22,13 @@ export { SearchMode, } from "./generated/data/models/index.js"; export { - AnalyzedTokenInfo, AnalyzeResult, + AnalyzedTokenInfo, AsciiFoldingTokenFilter, AzureActiveDirectoryApplicationCredentials, AzureOpenAIModelName, - BinaryQuantizationCompression, BM25Similarity, + BinaryQuantizationCompression, CharFilter as BaseCharFilter, CharFilterName, CjkBigramTokenFilter, @@ -50,6 +50,13 @@ export { DistanceScoringFunction, DistanceScoringParameters, DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, + DocumentIntelligenceLayoutSkillChunkingProperties, + DocumentIntelligenceLayoutSkillChunkingUnit, + DocumentIntelligenceLayoutSkillExtractionOptions, + DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + DocumentIntelligenceLayoutSkillOutputFormat, + DocumentIntelligenceLayoutSkillOutputMode, EdgeNGramTokenFilterSide, EdgeNGramTokenizer, ElisionTokenFilter, @@ -60,27 +67,31 @@ export { FreshnessScoringFunction, FreshnessScoringParameters, HighWaterMarkChangeDetectionPolicy, + IndexProjectionMode, IndexerExecutionResult, IndexerExecutionStatus, IndexerStatus, IndexingSchedule, - IndexProjectionMode, InputFieldMappingEntry, KeepTokenFilter, KeywordMarkerTokenFilter, KnownAzureOpenAIModelName, KnownBlobIndexerDataToExtract, KnownBlobIndexerImageAction, - KnownBlobIndexerParsingMode, KnownBlobIndexerPDFTextRotationAlgorithm, + KnownBlobIndexerParsingMode, KnownCharFilterName as KnownCharFilterNames, KnownCustomEntityLookupSkillLanguage, + KnownDocumentIntelligenceLayoutSkillChunkingUnit, + KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + KnownDocumentIntelligenceLayoutSkillOutputFormat, + KnownDocumentIntelligenceLayoutSkillOutputMode, KnownEntityCategory, KnownEntityRecognitionSkillLanguage, KnownImageAnalysisSkillLanguage, KnownImageDetail, - KnownIndexerExecutionEnvironment, KnownIndexProjectionMode, + KnownIndexerExecutionEnvironment, KnownKeyPhraseExtractionSkillLanguage, KnownLexicalNormalizerName as KnownLexicalNormalizerNames, KnownLexicalTokenizerName as KnownTokenizerNames, @@ -100,6 +111,7 @@ export { KnownVectorSearchAlgorithmKind, KnownVectorSearchAlgorithmMetric, KnownVectorSearchCompressionKind, + KnownVectorSearchCompressionRescoreStorageMethod, KnownVectorSearchCompressionTarget, KnownVectorSearchVectorizerKind, KnownVisualFeature, @@ -107,10 +119,10 @@ export { LengthTokenFilter, LexicalAnalyzer as BaseLexicalAnalyzer, LexicalAnalyzerName, + LexicalNormalizer as BaseLexicalNormalizer, + LexicalNormalizerName, LexicalTokenizer as BaseLexicalTokenizer, LexicalTokenizerName, - LexicalNormalizerName, - LexicalNormalizer as BaseLexicalNormalizer, LimitTokenFilter, LuceneStandardAnalyzer, MagnitudeScoringFunction, @@ -131,6 +143,7 @@ export { PhoneticEncoder, PhoneticTokenFilter, RankingOrder, + RescoringOptions, ResourceCounter, ScalarQuantizationCompression, ScalarQuantizationParameters, @@ -187,6 +200,7 @@ export { VectorEncodingFormat, VectorSearchCompression as BaseVectorSearchCompression, VectorSearchCompressionKind, + VectorSearchCompressionRescoreStorageMethod, VectorSearchCompressionTarget, VectorSearchProfile, VectorSearchVectorizerKind, diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index b62d9cfb3441..cd7b7682b42d 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -6,8 +6,8 @@ import { PagedAsyncIterableIterator } from "@azure/core-paging"; import { AsciiFoldingTokenFilter, AzureOpenAIModelName, - BinaryQuantizationCompression, BM25Similarity, + BinaryQuantizationCompression, CharFilterName, CjkBigramTokenFilter, ClassicSimilarity, @@ -17,10 +17,12 @@ import { ConditionalSkill, CorsOptions, CustomEntity, + CustomNormalizer, DefaultCognitiveServicesAccount, DictionaryDecompounderTokenFilter, DistanceScoringFunction, DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, EdgeNGramTokenFilterSide, EdgeNGramTokenizer, ElisionTokenFilter, @@ -29,14 +31,14 @@ import { FieldMapping, FreshnessScoringFunction, HighWaterMarkChangeDetectionPolicy, - IndexingSchedule, IndexProjectionMode, + IndexingSchedule, KeepTokenFilter, KeywordMarkerTokenFilter, KnownBlobIndexerDataToExtract, KnownBlobIndexerImageAction, - KnownBlobIndexerParsingMode, KnownBlobIndexerPDFTextRotationAlgorithm, + KnownBlobIndexerParsingMode, KnownCharFilterName, KnownCustomEntityLookupSkillLanguage, KnownEntityCategory, @@ -64,6 +66,7 @@ import { LengthTokenFilter, LexicalAnalyzerName, LexicalNormalizer as BaseLexicalNormalizer, + LexicalNormalizerName, LexicalTokenizerName, LimitTokenFilter, LuceneStandardAnalyzer, @@ -111,8 +114,6 @@ import { VectorSearchProfile, VectorSearchVectorizerKind, WordDelimiterTokenFilter, - CustomNormalizer, - LexicalNormalizerName, } from "./generated/service/models/index.js"; /** @@ -602,6 +603,7 @@ export type SearchIndexerSkill = | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill + | DocumentIntelligenceLayoutSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 From d4a4d873b956074d6a5ecaf28c9d2e3912780132 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 19:13:08 +0000 Subject: [PATCH 19/21] update comment per code review --- sdk/search/search-documents/src/serviceModels.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/search/search-documents/src/serviceModels.ts b/sdk/search/search-documents/src/serviceModels.ts index cd7b7682b42d..fd8767e9c72d 100644 --- a/sdk/search/search-documents/src/serviceModels.ts +++ b/sdk/search/search-documents/src/serviceModels.ts @@ -898,8 +898,8 @@ export interface SimpleField { * contents in a search response to save on storage overhead. This can only be set during index * creation and only for vector fields. This property cannot be changed for existing fields or set * as false for new fields. If this property is set as false, the property 'hidden' must be set to - * 'true'. This property must be false or unset for key fields, for new fields, and for non-vector - * fields. Disabling this property will reduce index storage requirements. + * 'true'. This property must be true or unset for key fields, for new fields, and for non-vector + * fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */ stored?: boolean; /** From 51a8aa7cc0236b1e8308e88d0ec463cd195b27f9 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 19:16:47 +0000 Subject: [PATCH 20/21] restore troublehsoot guide --- .../search-documents/src/TROUBLESHOOTING.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/sdk/search/search-documents/src/TROUBLESHOOTING.md b/sdk/search/search-documents/src/TROUBLESHOOTING.md index 679b79eaba26..efa5656e2bb0 100644 --- a/sdk/search/search-documents/src/TROUBLESHOOTING.md +++ b/sdk/search/search-documents/src/TROUBLESHOOTING.md @@ -1,7 +1,7 @@ # Troubleshooting Azure Cognitive Search SDK Issues The `azure-search-documents` package provides APIs for operations on the -[Azure Cognitive Search](https://docs.microsoft.com/azure/search/search-what-is-azure-search) cloud service. +[Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) cloud service. ## Table of Contents @@ -13,18 +13,18 @@ The `azure-search-documents` package provides APIs for operations on the ## Troubleshooting Issues By Response Code -See [this page](https://docs.microsoft.com/rest/api/searchservice/http-status-codes) for the common response status codes sent by the Azure Cognitive Search service. +See [this page](https://learn.microsoft.com/rest/api/searchservice/http-status-codes) for the common response status codes sent by the Azure Cognitive Search service. ### 207 Multi-Status -This response status indicates a partial success for an indexing operation. Some documents were successfully processed, but at least one failed. Details of the failed documents are present in the individual `IndexingResult` objects within the `IndexDocumentsResult`. If you want the [`indexDocuments`](https://docs.microsoft.com/javascript/api/@azure/search-documents/searchclient?view=azure-node-latest#@azure-search-documents-searchclient-indexdocuments) method call to throw an exception on any failure, set [`IndexDocumentsOptions.throwOnAnyError`](https://docs.microsoft.com/javascript/api/@azure/search-documents/indexdocumentsoptions?view=azure-node-latest#@azure-search-documents-indexdocumentsoptions-throwonanyfailure) +This response status indicates a partial success for an indexing operation. Some documents were successfully processed, but at least one failed. Details of the failed documents are present in the individual `IndexingResult` objects within the `IndexDocumentsResult`. If you want the [`indexDocuments`](https://learn.microsoft.com/javascript/api/@azure/search-documents/searchclient?view=azure-node-latest#@azure-search-documents-searchclient-indexdocuments) method call to throw an exception on any failure, set [`IndexDocumentsOptions.throwOnAnyError`](https://learn.microsoft.com/javascript/api/@azure/search-documents/indexdocumentsoptions?view=azure-node-latest#@azure-search-documents-indexdocumentsoptions-throwonanyfailure) to `true`. Each failure is then recorded in a separate `IndexingResult` and a single `IndexBatchException` is thrown by the method. ### 403 Forbidden Returned when you pass an invalid api-key. Search service uses two types of keys to control access: admin (read-write) and query (read-only). The **admin key** grants full rights to all operations, including the ability to manage the service, create and delete indexes, indexers, and data sources. The **query key** grants read-only access to indexes and documents. -Ensure that the key used for an API call provides sufficient privileges for the operation. See [here](https://docs.microsoft.com/azure/search/search-security-api-keys) +Ensure that the key used for an API call provides sufficient privileges for the operation. See [here](https://learn.microsoft.com/azure/search/search-security-api-keys) for details about managing API keys. If you are using the `azure-identity` package to authenticate requests to Azure Cognitive Search, please see our [troubleshooting guide](https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/identity/identity/TROUBLESHOOTING.md). @@ -37,7 +37,7 @@ Returned when a resource does not exist on the server. If you are managing or qu If this error occurs while you are trying to create an index, it means you already have the maximum number of indexes allowed for your pricing tier. A count of the indexes stored in Azure Cognitive Search is visible in the search service dashboard on the [Azure portal](https://portal.azure.com/). To view the indexes by name, click the Index tile. -Alternatively, you can also get a list of the indexes by name using the [listIndexNames() method](https://docs.microsoft.com/javascript/api/@azure/search-documents/searchindexclient?view=azure-node-latest#@azure-search-documents-searchindexclient-listindexesnames). +Alternatively, you can also get a list of the indexes by name using the [listIndexNames() method](https://learn.microsoft.com/javascript/api/@azure/search-documents/searchindexclient?view=azure-node-latest#@azure-search-documents-searchindexclient-listindexesnames). If this error occurs during document upload, it indicates that you've exceeded your quota on the number of documents per index. You must either create a new index or upgrade for higher capacity limits. @@ -45,9 +45,7 @@ If this error occurs during document upload, it indicates that you've exceeded y A common class of issues when using the Search SDK is that the result set of a search query is different from what is expected. -For such cases, you should start by running the search query in the portal to rule out any service-side issues with the search query or any parameter(s). Review the [OData syntax](https://docs.microsoft.com/azure/search/query-odata-filter-orderby-syntax), if any, used in the query. +For such cases, you should start by running the search query in the portal to rule out any service-side issues with the search query or any parameter(s). Review the [OData syntax](https://learn.microsoft.com/azure/search/query-odata-filter-orderby-syntax), if any, used in the query. -Once the result looks good in the portal, use that as the template to populate the objects and parameters in the search request APIs. You should also verify that the correct set of documents have been indexed and are being searched on the service side. One tip would be to start with a 'broad' query (one that returns a superset of desired results, possibly by giving a large value for or entirely removing, some [query parameters](https://docs.microsoft.com/rest/api/searchservice/search-documents#query-parameters)) +Once the result looks good in the portal, use that as the template to populate the objects and parameters in the search request APIs. You should also verify that the correct set of documents have been indexed and are being searched on the service side. One tip would be to start with a 'broad' query (one that returns a superset of desired results, possibly by giving a large value for or entirely removing, some [query parameters](https://learn.microsoft.com/rest/api/searchservice/search-documents#query-parameters)) and then progressively refining the query till it expresses the desired intent. - -![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-java%2Fsdk%2Fsearch%2Fazure-search-documents%2FTROUBLESHOOTING.png) From 8ac06167d8e6b81c68a6f3da6470f0e698118f18 Mon Sep 17 00:00:00 2001 From: Maor Leger Date: Fri, 3 Oct 2025 19:17:52 +0000 Subject: [PATCH 21/21] add comment on esm support --- sdk/search/search-documents/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/search/search-documents/CHANGELOG.md b/sdk/search/search-documents/CHANGELOG.md index 5951dc33db84..02ccfb7f3ce3 100644 --- a/sdk/search/search-documents/CHANGELOG.md +++ b/sdk/search/search-documents/CHANGELOG.md @@ -15,6 +15,10 @@ use it when analyzing text with `SearchIndexClient.analyzeText` and `SearchIndexAsyncClient.analyzeText`. - Support `DocumentIntelligenceLayoutSkill` skillset skill and `OneLake` `SearchIndexerDataSourceConnection` data source. +### Other Changes + +- Native ESM support has been added, and this package will now emit both CommonJS and ESM. + ## 12.2.0-beta.2 (2024-11-25) ### Features Added