Skip to content

Commit d68a636

Browse files
Fix PR comments
1 parent 7e6d696 commit d68a636

File tree

7 files changed

+12
-14
lines changed

7 files changed

+12
-14
lines changed

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/OpenShiftAiModel.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import java.util.Objects;
1919

2020
/**
21-
* Represents an OpenShift AI modelId that can be used for inference tasks.
21+
* Represents an OpenShift AI model that can be used for inference tasks.
2222
* This class extends RateLimitGroupingModel to handle rate limiting based on modelId and API key.
2323
*/
2424
public abstract class OpenShiftAiModel extends RateLimitGroupingModel {

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/OpenShiftAiService.java

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,6 @@
5757
import java.util.HashMap;
5858
import java.util.List;
5959
import java.util.Map;
60-
import java.util.Objects;
6160
import java.util.Set;
6261

6362
import static org.elasticsearch.xpack.inference.services.ServiceFields.MODEL_ID;
@@ -69,16 +68,15 @@
6968
import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap;
7069

7170
/**
72-
* OpenShiftAiService is an implementation of the SenderService that handles inference tasks
71+
* OpenShiftAiService is an implementation of the {@link SenderService} and {@link RerankingInferenceService} that handles inference tasks
7372
* using models deployed to OpenShift AI environment.
74-
* The service uses OpenShiftAiActionCreator to create actions for executing inference requests.
73+
* The service uses {@link OpenShiftAiActionCreator} to create actions for executing inference requests.
7574
*/
7675
public class OpenShiftAiService extends SenderService implements RerankingInferenceService {
7776
public static final String NAME = "openshift_ai";
7877
/**
79-
* The optimal batch size depends on the hardware the model is deployed on.
80-
* For OpenShift AI use a conservatively small max batch size as it is
81-
* unknown how the model is deployed
78+
* The optimal batch size depends on the model deployed in OpenShift AI.
79+
* For OpenShift AI use a conservatively small max batch size as it is unknown what model is deployed.
8280
*/
8381
static final int EMBEDDING_MAX_BATCH_SIZE = 20;
8482
private static final String SERVICE_NAME = "OpenShift AI";
@@ -115,7 +113,7 @@ protected void doInfer(
115113
) {
116114
var actionCreator = new OpenShiftAiActionCreator(getSender(), getServiceComponents());
117115

118-
switch (Objects.requireNonNull(model)) {
116+
switch (model) {
119117
case OpenShiftAiChatCompletionModel chatCompletionModel -> chatCompletionModel.accept(actionCreator)
120118
.execute(inputs, timeout, listener);
121119
case OpenShiftAiEmbeddingsModel embeddingsModel -> embeddingsModel.accept(actionCreator).execute(inputs, timeout, listener);

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/completion/OpenShiftAiChatCompletionModel.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
public class OpenShiftAiChatCompletionModel extends OpenShiftAiModel {
2929

3030
/**
31-
* Constructor for creating a OpenShiftAiChatCompletionModel with specified parameters.
31+
* Constructor for creating an OpenShiftAiChatCompletionModel with specified parameters.
3232
* @param inferenceEntityId the unique identifier for the inference entity
3333
* @param taskType the type of task this model is designed for
3434
* @param service the name of the inference service

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/completion/OpenShiftAiChatCompletionServiceSettings.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractUri;
2929

3030
/**
31-
* Represents the settings for a OpenShift AI chat completion service.
31+
* Represents the settings for an OpenShift AI chat completion service.
3232
* This class encapsulates the model ID, URI, and rate limit settings for the OpenShift AI chat completion service.
3333
*/
3434
public class OpenShiftAiChatCompletionServiceSettings extends OpenShiftAiServiceSettings {

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/completion/OpenShiftAiCompletionResponseHandler.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
public class OpenShiftAiCompletionResponseHandler extends OpenAiChatCompletionResponseHandler {
1818

1919
/**
20-
* Constructs a OpenShiftAiCompletionResponseHandler with the specified request type and response parser.
20+
* Constructs an OpenShiftAiCompletionResponseHandler with the specified request type and response parser.
2121
*
2222
* @param requestType The type of request being handled (e.g., "Openshift AI completions").
2323
* @param parseFunction The function to parse the response.

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/embeddings/OpenShiftAiEmbeddingsModel.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import java.util.Map;
2323

2424
/**
25-
* Represents a OpenShift AI embeddings model for inference.
25+
* Represents an OpenShift AI embeddings model for inference.
2626
* This class extends the OpenShiftAiModel and provides specific configurations and settings for embeddings tasks.
2727
*/
2828
public class OpenShiftAiEmbeddingsModel extends OpenShiftAiModel {
@@ -51,7 +51,7 @@ public OpenShiftAiEmbeddingsModel(OpenShiftAiEmbeddingsModel model, OpenShiftAiE
5151
}
5252

5353
/**
54-
* Constructor for creating a OpenShiftAiEmbeddingsModel with specified parameters.
54+
* Constructor for creating an OpenShiftAiEmbeddingsModel with specified parameters.
5555
*
5656
* @param inferenceEntityId the unique identifier for the inference entity
5757
* @param taskType the type of task this model is designed for

x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openshiftai/rerank/OpenShiftAiRerankServiceSettings.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractUri;
2929

3030
/**
31-
* Represents the settings for a OpenShift AI chat rerank service.
31+
* Represents the settings for an OpenShift AI chat rerank service.
3232
* This class encapsulates the model ID, URI, and rate limit settings for the OpenShift AI chat rerank service.
3333
*/
3434
public class OpenShiftAiRerankServiceSettings extends OpenShiftAiServiceSettings {

0 commit comments

Comments
 (0)