From 2d417de1b2f9497221241b354774efa3af05e6ce Mon Sep 17 00:00:00 2001 From: Nihit <1181800+nihit@users.noreply.github.com> Date: Fri, 4 Apr 2025 16:01:05 -0700 Subject: [PATCH] Update README to show to use logprobs parameter --- README.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/README.md b/README.md index 237a949f..9a4976b4 100644 --- a/README.md +++ b/README.md @@ -186,6 +186,30 @@ async def async_chat_completion(messages): asyncio.run(async_chat_completion(messages)) ``` +#### Fetching logprobs + +Logprobs are logarithms of token-level generation probabilities that indicate the likelihood of the generated token based on the previous tokens in the context. Logprobs allow us to estimate the model's confidence in its outputs, which can be used to decide how to optimally consume the model's output (e.g. rejecting low confidence outputs, retrying or ensembling model outputs etc). + +```python +from together import Together + +client = Together() + +response = client.chat.completions.create( + model="mistralai/Mixtral-8x7B-Instruct-v0.1", + messages=[{"role": "user", "content": "tell me about new york"}], + logprobs=1 +) + +response_lobprobs = response.choices[0].logprobs + +print(dict(zip(response_lobprobs.tokens, response_lobprobs.token_logprobs))) +# {'New': -2.384e-07, ' York': 0.0, ',': 0.0, ' also': -0.20703125, ' known': -0.20214844, ' as': -8.34465e-07, ... } +``` + +More details about using logprobs in Together's API can be found [here](https://docs.together.ai/docs/logprobs). + + ### Completions Completions are for code and language models shown [here](https://docs.together.ai/docs/inference-models). Below, a code model example is shown.