Skip to content

Commit 8bde339

Browse files
Releasing version 3.54.5
Releasing version 3.54.5
2 parents 9c8603e + 35822dd commit 8bde339

File tree

8 files changed

+99
-12
lines changed

8 files changed

+99
-12
lines changed

CHANGELOG.rst

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,24 @@ All notable changes to this project will be documented in this file.
66

77
The format is based on `Keep a Changelog <http://keepachangelog.com/>`__.
88

9+
3.54.5 - 2025-04-29
10+
--------------------
11+
Added
12+
~~~~~
13+
* Resource Scheduler Service
14+
15+
* Support for updating compartment of a schedule
16+
17+
* ``oci resource-scheduler schedule change-compartment``
18+
19+
* Support for listing schedules by resource ID
20+
21+
* ``oci resource-scheduler schedule list --resource-id``
22+
23+
* Support for new optional parameter chat-request-stream-options for cohere Chat in Generative Ai Inference Service
24+
25+
* ``oci generative-ai-inference chat-result chat-cohere-chat-request --chat-request-stream-options``
26+
927
3.54.4 - 2025-04-22
1028
--------------------
1129
Added

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Jinja2>=3.1.5; python_version >= '3.7'
1515
jmespath==0.10.0
1616
ndg-httpsclient==0.4.2
1717
mock==2.0.0
18-
oci==2.150.2
18+
oci==2.150.3
1919
packaging==20.2
2020
pluggy==0.13.0
2121
py==1.11.0

services/generative_ai/src/oci_cli_generative_ai/generated/generativeai_cli.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -834,7 +834,7 @@ def list_endpoints(ctx, from_json, all_pages, page_size, compartment_id, lifecyc
834834
@model_collection_group.command(name=cli_util.override('generative_ai.list_models.command_name', 'list-models'), help=u"""Lists the models in a specific compartment. Includes pretrained base models and fine-tuned custom models. \n[Command Reference](listModels)""")
835835
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment in which to list resources.""")
836836
@cli_util.option('--vendor', help=u"""A filter to return only resources that match the entire vendor given.""")
837-
@cli_util.option('--capability', type=custom_types.CliCaseInsensitiveChoice(["TEXT_GENERATION", "TEXT_SUMMARIZATION", "TEXT_EMBEDDINGS", "FINE_TUNE", "CHAT"]), multiple=True, help=u"""A filter to return only resources their capability matches the given capability.""")
837+
@cli_util.option('--capability', type=custom_types.CliCaseInsensitiveChoice(["TEXT_GENERATION", "TEXT_SUMMARIZATION", "TEXT_EMBEDDINGS", "FINE_TUNE", "CHAT", "TEXT_RERANK"]), multiple=True, help=u"""A filter to return only resources their capability matches the given capability.""")
838838
@cli_util.option('--lifecycle-state', type=custom_types.CliCaseInsensitiveChoice(["ACTIVE", "CREATING", "DELETING", "DELETED", "FAILED"]), help=u"""A filter to return only resources their lifecycleState matches the given lifecycleState.""")
839839
@cli_util.option('--display-name', help=u"""A filter to return only resources that match the given display name exactly.""")
840840
@cli_util.option('--id', help=u"""The ID of the model.""")

services/generative_ai_inference/src/oci_cli_generative_ai_inference/generated/generativeaiinference_cli.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,7 @@ def chat_generic_chat_request(ctx, from_json, compartment_id, serving_mode, chat
346346
347347
Example: `You are a travel advisor. Answer with a pirate tone.`""")
348348
@cli_util.option('--chat-request-is-stream', type=click.BOOL, help=u"""Whether to stream the partial progress of the model's response. When set to true, as tokens become available, they are sent as data-only server-sent events.""")
349+
@cli_util.option('--chat-request-stream-options', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
349350
@cli_util.option('--chat-request-max-tokens', type=click.INT, help=u"""The maximum number of output tokens that the model will generate for the response.""")
350351
@cli_util.option('--chat-request-max-input-tokens', type=click.INT, help=u"""The maximum number of input tokens to send to the model. If not specified, max_input_tokens is the model's context length limit minus a small buffer.""")
351352
@cli_util.option('--chat-request-temperature', help=u"""A number that sets the randomness of the generated output. A lower temperature means less random generations. Use lower numbers for tasks such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.""")
@@ -372,13 +373,13 @@ def chat_generic_chat_request(ctx, from_json, compartment_id, serving_mode, chat
372373
@cli_util.option('--chat-request-stop-sequences', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Stop the model generation when it reaches a stop sequence defined in this parameter.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
373374
@cli_util.option('--chat-request-is-raw-prompting', type=click.BOOL, help=u"""When enabled, the user\u2019s `message` will be sent to the model without any preprocessing.""")
374375
@cli_util.option('--chat-request-citation-quality', type=custom_types.CliCaseInsensitiveChoice(["ACCURATE", "FAST"]), help=u"""When FAST is selected, citations are generated at the same time as the text output and the request will be completed sooner. May result in less accurate citations.""")
375-
@cli_util.option('--chat-request-safety-mode', type=custom_types.CliCaseInsensitiveChoice(["CONTEXTUAL", "STRICT", "OFF"]), help=u"""Used to select the safety instruction inserted into the prompt. When selected CONTEXTUAL mode, It is appropriate for wide-ranging interactions with fewer constraints on output while maintaining core protections by rejecting harmful or illegal suggestions. When selected STRICT mode, it aims to avoid all sensitive topics, such as violent or sexual acts and profanity. When selected OFF, the safety instruction will be omitted. Note: This parameter is only compatible with models Command R 08-2024, Command R+ 08-2024 and newer. Also, command-r7b-12-2024 only supports \"CONTEXTUAL\" and \"STRICT\" modes.""")
376-
@json_skeleton_utils.get_cli_json_input_option({'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'chat-request-chat-history': {'module': 'generative_ai_inference', 'class': 'list[CohereMessage]'}, 'chat-request-documents': {'module': 'generative_ai_inference', 'class': 'list[object]'}, 'chat-request-response-format': {'module': 'generative_ai_inference', 'class': 'CohereResponseFormat'}, 'chat-request-tools': {'module': 'generative_ai_inference', 'class': 'list[CohereTool]'}, 'chat-request-tool-results': {'module': 'generative_ai_inference', 'class': 'list[CohereToolResult]'}, 'chat-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
376+
@cli_util.option('--chat-request-safety-mode', type=custom_types.CliCaseInsensitiveChoice(["CONTEXTUAL", "STRICT", "OFF"]), help=u"""Safety mode: Adds a safety instruction for the model to use when generating responses. Contextual: (Default) Puts fewer constraints on the output. It maintains core protections by aiming to reject harmful or illegal suggestions, but it allows profanity and some toxic content, sexually explicit and violent content, and content that contains medical, financial, or legal information. Contextual mode is suited for entertainment, creative, or academic use. Strict: Aims to avoid sensitive topics, such as violent or sexual acts and profanity. This mode aims to provide a safer experience by prohibiting responses or recommendations that it finds inappropriate. Strict mode is suited for corporate use, such as for corporate communications and customer service. Off: No safety mode is applied. Note: This parameter is only compatible with models cohere.command-r-08-2024, cohere.command-r-plus-08-2024 and Cohere models released after these models. See [release dates].""")
377+
@json_skeleton_utils.get_cli_json_input_option({'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'chat-request-chat-history': {'module': 'generative_ai_inference', 'class': 'list[CohereMessage]'}, 'chat-request-documents': {'module': 'generative_ai_inference', 'class': 'list[object]'}, 'chat-request-response-format': {'module': 'generative_ai_inference', 'class': 'CohereResponseFormat'}, 'chat-request-stream-options': {'module': 'generative_ai_inference', 'class': 'StreamOptions'}, 'chat-request-tools': {'module': 'generative_ai_inference', 'class': 'list[CohereTool]'}, 'chat-request-tool-results': {'module': 'generative_ai_inference', 'class': 'list[CohereToolResult]'}, 'chat-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}})
377378
@cli_util.help_option
378379
@click.pass_context
379-
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'chat-request-chat-history': {'module': 'generative_ai_inference', 'class': 'list[CohereMessage]'}, 'chat-request-documents': {'module': 'generative_ai_inference', 'class': 'list[object]'}, 'chat-request-response-format': {'module': 'generative_ai_inference', 'class': 'CohereResponseFormat'}, 'chat-request-tools': {'module': 'generative_ai_inference', 'class': 'list[CohereTool]'}, 'chat-request-tool-results': {'module': 'generative_ai_inference', 'class': 'list[CohereToolResult]'}, 'chat-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'ChatResult'})
380+
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'chat-request-chat-history': {'module': 'generative_ai_inference', 'class': 'list[CohereMessage]'}, 'chat-request-documents': {'module': 'generative_ai_inference', 'class': 'list[object]'}, 'chat-request-response-format': {'module': 'generative_ai_inference', 'class': 'CohereResponseFormat'}, 'chat-request-stream-options': {'module': 'generative_ai_inference', 'class': 'StreamOptions'}, 'chat-request-tools': {'module': 'generative_ai_inference', 'class': 'list[CohereTool]'}, 'chat-request-tool-results': {'module': 'generative_ai_inference', 'class': 'list[CohereToolResult]'}, 'chat-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'ChatResult'})
380381
@cli_util.wrap_exceptions
381-
def chat_cohere_chat_request(ctx, from_json, compartment_id, serving_mode, chat_request_message, chat_request_chat_history, chat_request_documents, chat_request_response_format, chat_request_is_search_queries_only, chat_request_preamble_override, chat_request_is_stream, chat_request_max_tokens, chat_request_max_input_tokens, chat_request_temperature, chat_request_top_k, chat_request_top_p, chat_request_prompt_truncation, chat_request_frequency_penalty, chat_request_presence_penalty, chat_request_seed, chat_request_is_echo, chat_request_tools, chat_request_tool_results, chat_request_is_force_single_step, chat_request_stop_sequences, chat_request_is_raw_prompting, chat_request_citation_quality, chat_request_safety_mode):
382+
def chat_cohere_chat_request(ctx, from_json, compartment_id, serving_mode, chat_request_message, chat_request_chat_history, chat_request_documents, chat_request_response_format, chat_request_is_search_queries_only, chat_request_preamble_override, chat_request_is_stream, chat_request_stream_options, chat_request_max_tokens, chat_request_max_input_tokens, chat_request_temperature, chat_request_top_k, chat_request_top_p, chat_request_prompt_truncation, chat_request_frequency_penalty, chat_request_presence_penalty, chat_request_seed, chat_request_is_echo, chat_request_tools, chat_request_tool_results, chat_request_is_force_single_step, chat_request_stop_sequences, chat_request_is_raw_prompting, chat_request_citation_quality, chat_request_safety_mode):
382383

383384
kwargs = {}
384385
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
@@ -407,6 +408,9 @@ def chat_cohere_chat_request(ctx, from_json, compartment_id, serving_mode, chat_
407408
if chat_request_is_stream is not None:
408409
_details['chatRequest']['isStream'] = chat_request_is_stream
409410

411+
if chat_request_stream_options is not None:
412+
_details['chatRequest']['streamOptions'] = cli_util.parse_json_parameter("chat_request_stream_options", chat_request_stream_options)
413+
410414
if chat_request_max_tokens is not None:
411415
_details['chatRequest']['maxTokens'] = chat_request_max_tokens
412416

0 commit comments

Comments
 (0)