@@ -202,6 +202,61 @@ public final class GoogleCloudDiscoveryengineV1SearchRequest extends com.google.
202202 @ com .google .api .client .util .Key
203203 private GoogleCloudDiscoveryengineV1SearchRequestQueryExpansionSpec queryExpansionSpec ;
204204
205+ /**
206+ * Optional. The ranking expression controls the customized ranking on retrieval documents. This
207+ * overrides ServingConfig.ranking_expression. The syntax and supported features depend on the
208+ * `ranking_expression_backend` value. If `ranking_expression_backend` is not provided, it
209+ * defaults to `RANK_BY_EMBEDDING`. If ranking_expression_backend is not provided or set to
210+ * `RANK_BY_EMBEDDING`, it should be a single function or multiple functions that are joined by
211+ * "+". * ranking_expression = function, { " + ", function }; Supported functions: * double *
212+ * relevance_score * double * dotProduct(embedding_field_path) Function variables: *
213+ * `relevance_score`: pre-defined keywords, used for measure relevance between query and document.
214+ * * `embedding_field_path`: the document embedding field used with query embedding vector. *
215+ * `dotProduct`: embedding function between `embedding_field_path` and query embedding vector.
216+ * Example ranking expression: If document has an embedding field doc_embedding, the ranking
217+ * expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. If
218+ * ranking_expression_backend is set to `RANK_BY_FORMULA`, the following expression types (and
219+ * combinations of those chained using + or * operators) are supported: * `double` * `signal` *
220+ * `log(signal)` * `exp(signal)` * `rr(signal, double > 0)` -- reciprocal rank transformation with
221+ * second argument being a denominator constant. * `is_nan(signal)` -- returns 0 if signal is NaN,
222+ * 1 otherwise. * `fill_nan(signal1, signal2 | double)` -- if signal1 is NaN, returns signal2 |
223+ * double, else returns signal1. Here are a few examples of ranking formulas that use the
224+ * supported ranking expression types: - `0.2 * semantic_similarity_score + 0.8 *
225+ * log(keyword_similarity_score)` -- mostly rank by the logarithm of `keyword_similarity_score`
226+ * with slight `semantic_smilarity_score` adjustment. - `0.2 *
227+ * exp(fill_nan(semantic_similarity_score, 0)) + 0.3 * is_nan(keyword_similarity_score)` -- rank
228+ * by the exponent of `semantic_similarity_score` filling the value with 0 if it's NaN, also add
229+ * constant 0.3 adjustment to the final score if `semantic_similarity_score` is NaN. - `0.2 *
230+ * rr(semantic_similarity_score, 16) + 0.8 * rr(keyword_similarity_score, 16)` -- mostly rank by
231+ * the reciprocal rank of `keyword_similarity_score` with slight adjustment of reciprocal rank of
232+ * `semantic_smilarity_score`. The following signals are supported: * `semantic_similarity_score`:
233+ * semantic similarity adjustment that is calculated using the embeddings generated by a
234+ * proprietary Google model. This score determines how semantically similar a search query is to a
235+ * document. * `keyword_similarity_score`: keyword match adjustment uses the Best Match 25 (BM25)
236+ * ranking function. This score is calculated using a probabilistic model to estimate the
237+ * probability that a document is relevant to a given query. * `relevance_score`: semantic
238+ * relevance adjustment that uses a proprietary Google model to determine the meaning and intent
239+ * behind a user's query in context with the content in the documents. * `pctr_rank`: predicted
240+ * conversion rate adjustment as a rank use predicted Click-through rate (pCTR) to gauge the
241+ * relevance and attractiveness of a search result from a user's perspective. A higher pCTR
242+ * suggests that the result is more likely to satisfy the user's query and intent, making it a
243+ * valuable signal for ranking. * `freshness_rank`: freshness adjustment as a rank *
244+ * `document_age`: The time in hours elapsed since the document was last updated, a floating-point
245+ * number (e.g., 0.25 means 15 minutes). * `topicality_rank`: topicality adjustment as a rank.
246+ * Uses proprietary Google model to determine the keyword-based overlap between the query and the
247+ * document. * `base_rank`: the default rank of the result
248+ * The value may be {@code null}.
249+ */
250+ @ com .google .api .client .util .Key
251+ private java .lang .String rankingExpression ;
252+
253+ /**
254+ * Optional. The backend to use for the ranking expression evaluation.
255+ * The value may be {@code null}.
256+ */
257+ @ com .google .api .client .util .Key
258+ private java .lang .String rankingExpressionBackend ;
259+
205260 /**
206261 * Optional. The specification for returning the relevance score.
207262 * The value may be {@code null}.
@@ -701,6 +756,122 @@ public GoogleCloudDiscoveryengineV1SearchRequest setQueryExpansionSpec(GoogleClo
701756 return this ;
702757 }
703758
759+ /**
760+ * Optional. The ranking expression controls the customized ranking on retrieval documents. This
761+ * overrides ServingConfig.ranking_expression. The syntax and supported features depend on the
762+ * `ranking_expression_backend` value. If `ranking_expression_backend` is not provided, it
763+ * defaults to `RANK_BY_EMBEDDING`. If ranking_expression_backend is not provided or set to
764+ * `RANK_BY_EMBEDDING`, it should be a single function or multiple functions that are joined by
765+ * "+". * ranking_expression = function, { " + ", function }; Supported functions: * double *
766+ * relevance_score * double * dotProduct(embedding_field_path) Function variables: *
767+ * `relevance_score`: pre-defined keywords, used for measure relevance between query and document.
768+ * * `embedding_field_path`: the document embedding field used with query embedding vector. *
769+ * `dotProduct`: embedding function between `embedding_field_path` and query embedding vector.
770+ * Example ranking expression: If document has an embedding field doc_embedding, the ranking
771+ * expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. If
772+ * ranking_expression_backend is set to `RANK_BY_FORMULA`, the following expression types (and
773+ * combinations of those chained using + or * operators) are supported: * `double` * `signal` *
774+ * `log(signal)` * `exp(signal)` * `rr(signal, double > 0)` -- reciprocal rank transformation with
775+ * second argument being a denominator constant. * `is_nan(signal)` -- returns 0 if signal is NaN,
776+ * 1 otherwise. * `fill_nan(signal1, signal2 | double)` -- if signal1 is NaN, returns signal2 |
777+ * double, else returns signal1. Here are a few examples of ranking formulas that use the
778+ * supported ranking expression types: - `0.2 * semantic_similarity_score + 0.8 *
779+ * log(keyword_similarity_score)` -- mostly rank by the logarithm of `keyword_similarity_score`
780+ * with slight `semantic_smilarity_score` adjustment. - `0.2 *
781+ * exp(fill_nan(semantic_similarity_score, 0)) + 0.3 * is_nan(keyword_similarity_score)` -- rank
782+ * by the exponent of `semantic_similarity_score` filling the value with 0 if it's NaN, also add
783+ * constant 0.3 adjustment to the final score if `semantic_similarity_score` is NaN. - `0.2 *
784+ * rr(semantic_similarity_score, 16) + 0.8 * rr(keyword_similarity_score, 16)` -- mostly rank by
785+ * the reciprocal rank of `keyword_similarity_score` with slight adjustment of reciprocal rank of
786+ * `semantic_smilarity_score`. The following signals are supported: * `semantic_similarity_score`:
787+ * semantic similarity adjustment that is calculated using the embeddings generated by a
788+ * proprietary Google model. This score determines how semantically similar a search query is to a
789+ * document. * `keyword_similarity_score`: keyword match adjustment uses the Best Match 25 (BM25)
790+ * ranking function. This score is calculated using a probabilistic model to estimate the
791+ * probability that a document is relevant to a given query. * `relevance_score`: semantic
792+ * relevance adjustment that uses a proprietary Google model to determine the meaning and intent
793+ * behind a user's query in context with the content in the documents. * `pctr_rank`: predicted
794+ * conversion rate adjustment as a rank use predicted Click-through rate (pCTR) to gauge the
795+ * relevance and attractiveness of a search result from a user's perspective. A higher pCTR
796+ * suggests that the result is more likely to satisfy the user's query and intent, making it a
797+ * valuable signal for ranking. * `freshness_rank`: freshness adjustment as a rank *
798+ * `document_age`: The time in hours elapsed since the document was last updated, a floating-point
799+ * number (e.g., 0.25 means 15 minutes). * `topicality_rank`: topicality adjustment as a rank.
800+ * Uses proprietary Google model to determine the keyword-based overlap between the query and the
801+ * document. * `base_rank`: the default rank of the result
802+ * @return value or {@code null} for none
803+ */
804+ public java .lang .String getRankingExpression () {
805+ return rankingExpression ;
806+ }
807+
808+ /**
809+ * Optional. The ranking expression controls the customized ranking on retrieval documents. This
810+ * overrides ServingConfig.ranking_expression. The syntax and supported features depend on the
811+ * `ranking_expression_backend` value. If `ranking_expression_backend` is not provided, it
812+ * defaults to `RANK_BY_EMBEDDING`. If ranking_expression_backend is not provided or set to
813+ * `RANK_BY_EMBEDDING`, it should be a single function or multiple functions that are joined by
814+ * "+". * ranking_expression = function, { " + ", function }; Supported functions: * double *
815+ * relevance_score * double * dotProduct(embedding_field_path) Function variables: *
816+ * `relevance_score`: pre-defined keywords, used for measure relevance between query and document.
817+ * * `embedding_field_path`: the document embedding field used with query embedding vector. *
818+ * `dotProduct`: embedding function between `embedding_field_path` and query embedding vector.
819+ * Example ranking expression: If document has an embedding field doc_embedding, the ranking
820+ * expression could be `0.5 * relevance_score + 0.3 * dotProduct(doc_embedding)`. If
821+ * ranking_expression_backend is set to `RANK_BY_FORMULA`, the following expression types (and
822+ * combinations of those chained using + or * operators) are supported: * `double` * `signal` *
823+ * `log(signal)` * `exp(signal)` * `rr(signal, double > 0)` -- reciprocal rank transformation with
824+ * second argument being a denominator constant. * `is_nan(signal)` -- returns 0 if signal is NaN,
825+ * 1 otherwise. * `fill_nan(signal1, signal2 | double)` -- if signal1 is NaN, returns signal2 |
826+ * double, else returns signal1. Here are a few examples of ranking formulas that use the
827+ * supported ranking expression types: - `0.2 * semantic_similarity_score + 0.8 *
828+ * log(keyword_similarity_score)` -- mostly rank by the logarithm of `keyword_similarity_score`
829+ * with slight `semantic_smilarity_score` adjustment. - `0.2 *
830+ * exp(fill_nan(semantic_similarity_score, 0)) + 0.3 * is_nan(keyword_similarity_score)` -- rank
831+ * by the exponent of `semantic_similarity_score` filling the value with 0 if it's NaN, also add
832+ * constant 0.3 adjustment to the final score if `semantic_similarity_score` is NaN. - `0.2 *
833+ * rr(semantic_similarity_score, 16) + 0.8 * rr(keyword_similarity_score, 16)` -- mostly rank by
834+ * the reciprocal rank of `keyword_similarity_score` with slight adjustment of reciprocal rank of
835+ * `semantic_smilarity_score`. The following signals are supported: * `semantic_similarity_score`:
836+ * semantic similarity adjustment that is calculated using the embeddings generated by a
837+ * proprietary Google model. This score determines how semantically similar a search query is to a
838+ * document. * `keyword_similarity_score`: keyword match adjustment uses the Best Match 25 (BM25)
839+ * ranking function. This score is calculated using a probabilistic model to estimate the
840+ * probability that a document is relevant to a given query. * `relevance_score`: semantic
841+ * relevance adjustment that uses a proprietary Google model to determine the meaning and intent
842+ * behind a user's query in context with the content in the documents. * `pctr_rank`: predicted
843+ * conversion rate adjustment as a rank use predicted Click-through rate (pCTR) to gauge the
844+ * relevance and attractiveness of a search result from a user's perspective. A higher pCTR
845+ * suggests that the result is more likely to satisfy the user's query and intent, making it a
846+ * valuable signal for ranking. * `freshness_rank`: freshness adjustment as a rank *
847+ * `document_age`: The time in hours elapsed since the document was last updated, a floating-point
848+ * number (e.g., 0.25 means 15 minutes). * `topicality_rank`: topicality adjustment as a rank.
849+ * Uses proprietary Google model to determine the keyword-based overlap between the query and the
850+ * document. * `base_rank`: the default rank of the result
851+ * @param rankingExpression rankingExpression or {@code null} for none
852+ */
853+ public GoogleCloudDiscoveryengineV1SearchRequest setRankingExpression (java .lang .String rankingExpression ) {
854+ this .rankingExpression = rankingExpression ;
855+ return this ;
856+ }
857+
858+ /**
859+ * Optional. The backend to use for the ranking expression evaluation.
860+ * @return value or {@code null} for none
861+ */
862+ public java .lang .String getRankingExpressionBackend () {
863+ return rankingExpressionBackend ;
864+ }
865+
866+ /**
867+ * Optional. The backend to use for the ranking expression evaluation.
868+ * @param rankingExpressionBackend rankingExpressionBackend or {@code null} for none
869+ */
870+ public GoogleCloudDiscoveryengineV1SearchRequest setRankingExpressionBackend (java .lang .String rankingExpressionBackend ) {
871+ this .rankingExpressionBackend = rankingExpressionBackend ;
872+ return this ;
873+ }
874+
704875 /**
705876 * Optional. The specification for returning the relevance score.
706877 * @return value or {@code null} for none
0 commit comments