Skip to content

Commit fbb07c9

Browse files
authored
Merge pull request #753 from oracle/release_2024-03-05
Releasing version 3.37.12
2 parents 4ac38a9 + 30a7a88 commit fbb07c9

File tree

16 files changed

+433
-143
lines changed

16 files changed

+433
-143
lines changed

CHANGELOG.rst

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,47 @@ All notable changes to this project will be documented in this file.
66

77
The format is based on `Keep a Changelog <http://keepachangelog.com/>`__.
88

9+
3.37.12 - 2024-03-05
10+
--------------------
11+
Added
12+
~~~~~
13+
* Support for server streaming events in the Generative AI Inference Service when `inference-request-is-stream` is set to true
14+
15+
* ``oci generative-ai-inference generate-text-result generate-text-cohere-llm-inference-request --inference-request-is-stream``
16+
* ``oci generative-ai-inference generate-text-result generate-text-llama-llm-inference-request --inference-request-is-stream``
17+
18+
* Speech service
19+
20+
* Support for Whisper Models in creation of transcription job
21+
22+
* ``oci speech transcription-job create --model-details``
23+
24+
* Support for Delete Job API in transcription job
25+
26+
* ``oci speech transcription-job delete --transcription-job-id``
27+
28+
* Operations Insights service
29+
30+
* Support for updating host-insights
31+
32+
* ``oci opsi host-insights update-pe-comanaged-host``
33+
34+
* Support to receive insights from resources in child compartments for news reports
35+
36+
* ``oci opsi news-reports create --are-child-compartments-included, --day-of-week``
37+
38+
* Support to update more parameters for news reports
39+
40+
* ``oci opsi news-reports update --are-child-compartments-included, --day-of-week, --description, --name``
41+
42+
* Support to choose the day of the week the report is received for news reports
43+
44+
* ``oci opsi news-reports update --are-child-compartments-included, --day-of-week, --description, --name``
45+
46+
Changed
47+
~~~~~~~
48+
* Upgraded the cryptography version to (>=3.2.1,<43.0.0) and pyOpenSSL version to (>=17.5.0,<25.0.0)
49+
950
3.37.11 - 2024-02-27
1051
--------------------
1152
Added

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ Jinja2==3.0.3
1414
jmespath==0.10.0
1515
ndg-httpsclient==0.4.2
1616
mock==2.0.0
17-
oci==2.123.0
17+
oci==2.124.0
1818
packaging==20.2
1919
pluggy==0.13.0
2020
py==1.11.0

services/ai_speech/src/oci_cli_ai_service_speech/generated/aiservicespeech_cli.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,70 @@ def create_transcription_job_object_list_inline_input_location(ctx, from_json, w
373373
cli_util.render_response(result, ctx)
374374

375375

376+
@transcription_job_group.command(name=cli_util.override('speech.delete_transcription_job.command_name', 'delete'), help=u"""Delete API cleans job, tasks and the related metadata. However the generated transcriptions in customer tenancy will not be deleted. \n[Command Reference](deleteTranscriptionJob)""")
377+
@cli_util.option('--transcription-job-id', required=True, help=u"""Unique Transcription Job identifier.""")
378+
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
379+
@cli_util.confirm_delete_option
380+
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", "CANCELING", "CANCELED"]), multiple=True, help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. Multiple states can be specified, returning on the first state. For example, --wait-for-state SUCCEEDED --wait-for-state FAILED would return on whichever lifecycle state is reached first. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
381+
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
382+
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
383+
@json_skeleton_utils.get_cli_json_input_option({})
384+
@cli_util.help_option
385+
@click.pass_context
386+
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
387+
@cli_util.wrap_exceptions
388+
def delete_transcription_job(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, transcription_job_id, if_match):
389+
390+
if isinstance(transcription_job_id, six.string_types) and len(transcription_job_id.strip()) == 0:
391+
raise click.UsageError('Parameter --transcription-job-id cannot be whitespace or empty string')
392+
393+
kwargs = {}
394+
if if_match is not None:
395+
kwargs['if_match'] = if_match
396+
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
397+
client = cli_util.build_client('ai_speech', 'ai_service_speech', ctx)
398+
result = client.delete_transcription_job(
399+
transcription_job_id=transcription_job_id,
400+
**kwargs
401+
)
402+
if wait_for_state:
403+
404+
if hasattr(client, 'get_transcription_job') and callable(getattr(client, 'get_transcription_job')):
405+
try:
406+
wait_period_kwargs = {}
407+
if max_wait_seconds is not None:
408+
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
409+
if wait_interval_seconds is not None:
410+
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
411+
412+
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
413+
oci.wait_until(client, client.get_transcription_job(transcription_job_id), 'lifecycle_state', wait_for_state, succeed_on_not_found=True, **wait_period_kwargs)
414+
except oci.exceptions.ServiceError as e:
415+
# We make an initial service call so we can pass the result to oci.wait_until(), however if we are waiting on the
416+
# outcome of a delete operation it is possible that the resource is already gone and so the initial service call
417+
# will result in an exception that reflects a HTTP 404. In this case, we can exit with success (rather than raising
418+
# the exception) since this would have been the behaviour in the waiter anyway (as for delete we provide the argument
419+
# succeed_on_not_found=True to the waiter).
420+
#
421+
# Any non-404 should still result in the exception being thrown.
422+
if e.status == 404:
423+
pass
424+
else:
425+
raise
426+
except oci.exceptions.MaximumWaitTimeExceeded as e:
427+
# If we fail, we should show an error, but we should still provide the information to the customer
428+
click.echo('Failed to wait until the resource entered the specified state. Please retrieve the resource to find its current state', file=sys.stderr)
429+
cli_util.render_response(result, ctx)
430+
sys.exit(2)
431+
except Exception:
432+
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
433+
cli_util.render_response(result, ctx)
434+
raise
435+
else:
436+
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
437+
cli_util.render_response(result, ctx)
438+
439+
376440
@transcription_job_group.command(name=cli_util.override('speech.get_transcription_job.command_name', 'get'), help=u"""Gets a Transcription Job by identifier \n[Command Reference](getTranscriptionJob)""")
377441
@cli_util.option('--transcription-job-id', required=True, help=u"""Unique Transcription Job identifier.""")
378442
@json_skeleton_utils.get_cli_json_input_option({})

services/ai_speech/tests/util/generated/command_to_api.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
"speech.cancel_transcription_task": "oci.ai_speech.AIServiceSpeechClient.cancel_transcription_task",
77
"speech.change_transcription_job_compartment": "oci.ai_speech.AIServiceSpeechClient.change_transcription_job_compartment",
88
"speech.create_transcription_job": "oci.ai_speech.AIServiceSpeechClient.create_transcription_job",
9+
"speech.delete_transcription_job": "oci.ai_speech.AIServiceSpeechClient.delete_transcription_job",
910
"speech.get_transcription_job": "oci.ai_speech.AIServiceSpeechClient.get_transcription_job",
1011
"speech.get_transcription_task": "oci.ai_speech.AIServiceSpeechClient.get_transcription_task",
1112
"speech.list_transcription_jobs": "oci.ai_speech.AIServiceSpeechClient.list_transcription_jobs",

services/container_instances/src/oci_cli_container_instance/generated/containerinstance_cli.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -808,10 +808,10 @@ def restart_container_instance(ctx, from_json, wait_for_state, max_wait_seconds,
808808
cli_util.render_response(result, ctx)
809809

810810

811-
@container_group.command(name=cli_util.override('container_instances.retrieve_logs.command_name', 'retrieve-logs'), help=u"""Retrieves recent logs from the specified container. The most recent 256 KB of logs are returned. \n[Command Reference](retrieveLogs)""")
811+
@container_group.command(name=cli_util.override('container_instances.retrieve_logs.command_name', 'retrieve-logs'), help=u"""Returns the most recent 256 KB of logs from the specified container. \n[Command Reference](retrieveLogs)""")
812812
@cli_util.option('--container-id', required=True, help=u"""The [OCID] of the container.""")
813813
@cli_util.option('--file', type=click.File(mode='wb'), required=True, help="The name of the file that will receive the response data, or '-' to write to STDOUT.")
814-
@cli_util.option('--is-previous', type=click.BOOL, help=u"""Returns the logs for the previous run of the container in a pod if the pod exists. If the container fails for some reason, this parameter is useful to determine the root cause of the failure.""")
814+
@cli_util.option('--is-previous', type=click.BOOL, help=u"""Returns the logs for the previous run of the container in a pod. If the container fails, this parameter can help you to determine the root cause of the failure.""")
815815
@json_skeleton_utils.get_cli_json_input_option({})
816816
@cli_util.help_option
817817
@click.pass_context

services/generative_ai_inference/src/oci_cli_generative_ai_inference/generative_ai_inference_cli_extended.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -20,27 +20,3 @@
2020
# EmbedText API: embed_text_on_demand_serving_mode, embed_text_dedicated_serving_mode
2121
generativeaiinference_cli.embed_text_result_group.commands.pop(generativeaiinference_cli.embed_text_on_demand_serving_mode.name)
2222
generativeaiinference_cli.embed_text_result_group.commands.pop(generativeaiinference_cli.embed_text_dedicated_serving_mode.name)
23-
24-
25-
@cli_util.copy_params_from_generated_command(generativeaiinference_cli.generate_text_cohere_llm_inference_request, params_to_exclude=['inference_request_is_stream'])
26-
@generativeaiinference_cli.generate_text_result_group.command(name='generate-text-cohere-llm-inference-request', help=generativeaiinference_cli.generate_text_cohere_llm_inference_request.help)
27-
@click.pass_context
28-
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'inference-request-stop-sequences': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'GenerateTextResult'})
29-
@cli_util.wrap_exceptions
30-
def generate_text_cohere_llm_inference_request_extended(ctx, **kwargs):
31-
32-
kwargs['inference_request_is_stream'] = False
33-
34-
ctx.invoke(generativeaiinference_cli.generate_text_cohere_llm_inference_request, **kwargs)
35-
36-
37-
@cli_util.copy_params_from_generated_command(generativeaiinference_cli.generate_text_llama_llm_inference_request, params_to_exclude=['inference_request_is_stream'])
38-
@generativeaiinference_cli.generate_text_result_group.command(name='generate-text-llama-llm-inference-request', help=generativeaiinference_cli.generate_text_llama_llm_inference_request.help)
39-
@click.pass_context
40-
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'serving-mode': {'module': 'generative_ai_inference', 'class': 'ServingMode'}, 'inference-request-stop': {'module': 'generative_ai_inference', 'class': 'list[string]'}}, output_type={'module': 'generative_ai_inference', 'class': 'GenerateTextResult'})
41-
@cli_util.wrap_exceptions
42-
def generate_text_llama_llm_inference_request_extended(ctx, **kwargs):
43-
44-
kwargs['inference_request_is_stream'] = False
45-
46-
ctx.invoke(generativeaiinference_cli.generate_text_llama_llm_inference_request, **kwargs)

0 commit comments

Comments
 (0)