Internal server error on model reflection post processing
Problem to solve
The following exception was detected in production log.
Traceback (most recent call last):
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/anyio/streams/memory.py", line 98, in receive
return self.receive_nowait()
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/anyio/streams/memory.py", line 93, in receive_nowait
raise WouldBlock
anyio.WouldBlock
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/base.py", line 43, in call_next
message = await recv_stream.receive()
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/anyio/streams/memory.py", line 118, in receive
raise EndOfStream
anyio.EndOfStream
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/ai_gateway/api/middleware.py", line 107, in dispatch
response = await call_next(request)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/base.py", line 46, in call_next
raise app_exc
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/base.py", line 36, in coro
await self.app(scope, request.receive, send_stream.send)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/authentication.py", line 48, in __call__
await self.app(scope, receive, send)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/base.py", line 68, in __call__
response = await self.dispatch_func(request, call_next)
File "/app/ai_gateway/api/middleware.py", line 274, in dispatch
return await call_next(request)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/base.py", line 46, in call_next
raise app_exc
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/middleware/base.py", line 36, in coro
await self.app(scope, request.receive, send_stream.send)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/exceptions.py", line 93, in __call__
raise exc
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
await self.app(scope, receive, sender)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 21, in __call__
raise e
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 18, in __call__
await self.app(scope, receive, send)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/routing.py", line 670, in __call__
await route.handle(scope, receive, send)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/routing.py", line 266, in handle
await self.app(scope, receive, send)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/starlette/routing.py", line 65, in app
response = await func(request)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/fastapi/routing.py", line 231, in app
raw_response = await run_endpoint_function(
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/fastapi/routing.py", line 160, in run_endpoint_function
return await dependant.call(**values)
File "/opt/venv/ai-gateway-9TtSrW0h-py3.9/lib/python3.9/site-packages/dependency_injector/wiring.py", line 994, in _patched
return await _async_inject(
File "src/dependency_injector/_cwiring.pyx", line 66, in _async_inject
File "/app/ai_gateway/api/v2/endpoints/code.py", line 117, in completions
suggestion = await code_completions(
File "/app/ai_gateway/code_suggestions/processing/base.py", line 79, in generate
return await self._generate(
File "/app/ai_gateway/code_suggestions/processing/completions.py", line 225, in _generate
completion = self.post_processor_factory(
File "/app/ai_gateway/code_suggestions/processing/post/completions.py", line 25, in process
completion = clean_model_reflection(self.code_context, completion)
File "/app/ai_gateway/code_suggestions/processing/post/ops.py", line 90, in clean_model_reflection
or _is_large_group(group, target_lines, **kwargs)
File "/app/ai_gateway/code_suggestions/processing/post/ops.py", line 62, in _is_large_group
and not _with_special_characters(counter, min_special_chars)
File "/app/ai_gateway/code_suggestions/processing/post/ops.py", line 43, in _with_special_characters
return (special_characters_count / total_count) >= min_p
ZeroDivisionError: division by zero
See full log event.
It is noted that json.jsonPayload.model_output_length_stripped
is 0
so likely that the model return a suggestion that contain only whitespace characters.
Proposal
Ensure that model reflection is guarded against suggestion that contain only whitespace characters.
Further details
Links / references
Edited by Tan Le