Skip to content
Snippets Groups Projects
Commit 8e0e562a authored by Eduardo Bonet's avatar Eduardo Bonet :speech_balloon:
Browse files

Merge branch 'fix-litellm-logprobs-client' into 'main'

fix: update litellm logprobs after upstream bugfix

See merge request gitlab-org/modelops/applied-ml/code-suggestions/ai-assist!1958
parents 48bddc4c f35388e7
No related branches found
No related tags found
1 merge request!1958fix: update litellm logprobs after upstream bugfix
Pipeline #1666806651 passed
......@@ -348,13 +348,8 @@ class LiteLlmTextGenModel(TextGenModelBase):
score = 10**5 # default high value if model doesn't provide score
# For fireworks/qwen, use logprob of first token as score
# using original_response - see https://github.com/BerriAI/litellm/issues/7974
if self.provider == KindModelProvider.FIREWORKS and suggestion.get(
"_hidden_params"
):
score = suggestion._hidden_params["original_response"]["choices"][0][
"logprobs"
].token_logprobs[0]
if self.provider == KindModelProvider.FIREWORKS:
score = suggestion.choices[0].logprobs.token_logprobs[0]
return TextGenModelOutput(
text=self._extract_suggestion_text(suggestion),
......
......@@ -375,11 +375,9 @@ def mock_litellm_acompletion():
AsyncMock(
message=AsyncMock(content="Test response"),
text="Test text completion response",
logprobs= AsyncMock(token_logprobs=[999]),
),
],
_hidden_params={
"original_response": {"choices": [{"logprobs": AsyncMock(token_logprobs=[999])}]}
},
usage=AsyncMock(completion_tokens=999),
)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment