Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • ggillies/ai-assist
  • gitlab-org/modelops/applied-ml/code-suggestions/ai-assist
  • golmohamadirasuol139401/ai-assist
  • alejandro/ai-assist
  • JackSkylark/ai-assist
  • knottos/ai-assist
  • nanmu42/model-gateway
  • shinya.maeda/ai-assist
  • abitrolly/ai-assist
  • gitlab-community/modelops/applied-ml/code-suggestions/ai-assist
  • tanducque/ai-assist
  • gitlab-renovate-forks/ai-assist
  • armbiant/gitlab-ai-gateway
  • mike22664/ai-assist
14 results
Show changes
Commits on Source (59)
Showing
with 484 additions and 36 deletions
......@@ -61,13 +61,13 @@ include:
inputs:
runway_service_id: ai-gateway
image: "$CI_REGISTRY_IMAGE/model-gateway:${CI_COMMIT_SHORT_SHA}"
runway_version: v3.53.2
runway_version: v3.55.3
- project: "gitlab-com/gl-infra/platform/runway/runwayctl"
file: "ci-tasks/service-project/runway.yml"
inputs:
runway_service_id: ai-gateway-custom
image: "$SELF_HOSTED_TARGET_IMAGE"
runway_version: v3.53.2
runway_version: v3.55.3
- component: ${CI_SERVER_FQDN}/gitlab-org/components/danger-review/danger-review@2.0.0
rules:
- if: $CI_SERVER_HOST == "gitlab.com"
......
include:
# see https://gitlab.com/gitlab-com/gl-infra/common-ci-tasks/-/blob/main/oidc.md
- project: 'gitlab-com/gl-infra/common-ci-tasks'
ref: v2.56.0 # renovate:managed
ref: v2.57.3 # renovate:managed
file: 'oidc.yml'
.ingest-base:
......@@ -14,7 +14,6 @@ include:
GITLAB_DOCS_JSONL_EXPORT_PATH: "$CI_PROJECT_DIR/docs.jsonl"
GITLAB_DOCS_WEB_ROOT_URL: "https://gitlab.com/help"
needs: [build:ingest-image]
when: manual
.ingest-dev:
variables:
......
......@@ -31,7 +31,6 @@ publish-dryrun:
.docker-release:
extends: .docker
when: manual
script:
- docker pull "${TARGET_IMAGE}"
- docker tag "${TARGET_IMAGE}" "${RELEASE_VERSION}"
......
......@@ -29,6 +29,7 @@ spec:
failure_threshold: 24
liveness_probe:
path: "/monitoring/healthz"
timeout_seconds: 5
scalability:
min_instances: 1
max_instances: 100
......
......@@ -28,6 +28,7 @@ spec:
failure_threshold: 24
liveness_probe:
path: "/monitoring/healthz"
timeout_seconds: 5
scalability:
min_instances: 4
max_instances: 200
......
......@@ -63,6 +63,10 @@ X_GITLAB_FEATURE_ENABLEMENT_TYPE_HEADER = "X-Gitlab-Feature-Enablement-Type"
X_GITLAB_MODEL_GATEWAY_REQUEST_SENT_AT = "X-Gitlab-Rails-Send-Start"
X_GITLAB_LANGUAGE_SERVER_VERSION = "X-Gitlab-Language-Server-Version"
X_GITLAB_ENABLED_FEATURE_FLAGS = "x-gitlab-enabled-feature-flags"
X_GITLAB_CLIENT_TYPE = "X-Gitlab-Client-Type"
X_GITLAB_CLIENT_VERSION = "X-Gitlab-Client-Version"
X_GITLAB_CLIENT_NAME = "X-Gitlab-Client-Name"
X_GITLAB_INTERFACE = "X-Gitlab-Interface"
class _PathResolver:
......@@ -368,6 +372,10 @@ class InternalEventMiddleware:
instance_version=request.headers.get(X_GITLAB_VERSION_HEADER),
global_user_id=request.headers.get(X_GITLAB_GLOBAL_USER_ID_HEADER),
is_gitlab_team_member=request.headers.get(X_GITLAB_TEAM_MEMBER_HEADER),
client_type=request.headers.get(X_GITLAB_CLIENT_TYPE),
client_name=request.headers.get(X_GITLAB_CLIENT_NAME),
client_version=request.headers.get(X_GITLAB_CLIENT_VERSION),
interface=request.headers.get(X_GITLAB_INTERFACE),
feature_enabled_by_namespace_ids=feature_enabled_by_namespace_ids,
feature_enablement_type=request.headers.get(
X_GITLAB_FEATURE_ENABLEMENT_TYPE_HEADER
......
......@@ -44,7 +44,7 @@ class PromptPayload(BaseModel):
Literal[KindModelProvider.ANTHROPIC, KindModelProvider.LITELLM]
] = None
model: Optional[KindAnthropicModel | KindLiteLlmModel] = (
KindAnthropicModel.CLAUDE_2_0
KindAnthropicModel.CLAUDE_2_1
)
params: Optional[AnthropicParams] = None
model_endpoint: Optional[str] = None
......
......@@ -63,9 +63,9 @@ def authorize_agent_request(
agent_request: AgentRequest,
internal_event_client: InternalEventsClient,
):
if current_user.can(GitLabUnitPrimitive.DUO_CHAT):
if current_user.can(GitLabUnitPrimitive.AMAZON_Q_INTEGRATION):
internal_event_client.track_event(
f"request_{GitLabUnitPrimitive.DUO_CHAT}",
f"request_{GitLabUnitPrimitive.AMAZON_Q_INTEGRATION}",
category=__name__,
)
else:
......
......@@ -306,7 +306,7 @@ def _resolve_code_generations_anthropic(
generations_anthropic_factory: Factory[CodeGenerations],
) -> CodeGenerations:
model_name = (
payload.model_name if payload.model_name else KindAnthropicModel.CLAUDE_2_0
payload.model_name if payload.model_name else KindAnthropicModel.CLAUDE_2_1
)
return generations_anthropic_factory(
......
......@@ -178,14 +178,16 @@ class ReActPromptTemplate(Runnable[ReActAgentInputs, PromptValue]):
if not isinstance(messages[-1], HumanMessage):
raise ValueError("Last message must be a human message")
messages.append(
AIMessage(
jinja2_formatter(
self.prompt_template["assistant"],
agent_scratchpad=input.agent_scratchpad,
if "assistant" in self.prompt_template:
messages.append(
AIMessage(
jinja2_formatter(
self.prompt_template["assistant"],
agent_scratchpad=input.agent_scratchpad,
)
)
)
)
return ChatPromptValue(messages=messages)
......
......@@ -5,6 +5,10 @@ from dependency_injector import containers, providers
from ai_gateway.chat.agents import ReActAgent, TypeAgentEvent
from ai_gateway.chat.executor import GLAgentRemoteExecutor, TypeAgentFactory
from ai_gateway.chat.toolset import DuoChatToolsRegistry
from ai_gateway.integrations.amazon_q.chat import ChatAmazonQ
from ai_gateway.integrations.amazon_q.client import AmazonQClientFactory
from ai_gateway.integrations.amazon_q.message_processor import MessageProcessor
from ai_gateway.integrations.amazon_q.response_handlers import ResponseHandler
if TYPE_CHECKING:
from ai_gateway.prompts import BasePromptRegistry
......@@ -39,6 +43,27 @@ class ContainerChat(containers.DeclarativeContainer):
prompt_registry=prompts.prompt_registry,
)
# Core dependencies
message_processor = providers.Singleton(MessageProcessor)
response_handler = providers.Singleton(ResponseHandler)
# Client factory
amazon_q_client_factory = providers.Singleton(
AmazonQClientFactory, config=config.amazon_q
)
# Chat factory with validated config
amazon_q_factory = providers.Factory(
ChatAmazonQ,
amazon_q_client_factory=amazon_q_client_factory,
message_processor=message_processor,
response_handler=response_handler,
metadata=providers.Dict(user=providers.Callable(lambda: None)),
model="amazon_q",
temperature=0.7,
max_retries=3,
)
# We need to resolve the model based on model name provided in request payload
# Hence, `models._anthropic_claude` and `models._anthropic_claude_chat_factory` are only partially applied here.
anthropic_claude_factory = providers.FactoryAggregate(
......
......@@ -14,7 +14,7 @@ from ai_gateway.chat.agents import (
from ai_gateway.chat.base import BaseToolsRegistry
from ai_gateway.chat.tools import BaseTool
from ai_gateway.internal_events import InternalEventsClient
from ai_gateway.prompts.typing import ModelMetadata
from ai_gateway.prompts.typing import ModelMetadataType
__all__ = [
"TypeAgentFactory",
......@@ -32,10 +32,11 @@ class TypeAgentFactory(Protocol[TypeAgentEvent]):
def __call__(
self,
*,
model_metadata: ModelMetadata,
model_metadata: ModelMetadataType,
) -> Runnable[TypeAgentInputs, TypeAgentEvent]: ...
# pylint: disable=attribute-defined-outside-init
class GLAgentRemoteExecutor(Generic[TypeAgentInputs, TypeAgentEvent]):
def __init__(
self,
......@@ -48,6 +49,7 @@ class GLAgentRemoteExecutor(Generic[TypeAgentInputs, TypeAgentEvent]):
self.tools_registry = tools_registry
self.internal_event_client = internal_event_client
self._tools: list[BaseTool] | None = None
self._user: StarletteUser | None = None
@property
def tools(self) -> list[BaseTool]:
......@@ -66,10 +68,13 @@ class GLAgentRemoteExecutor(Generic[TypeAgentInputs, TypeAgentEvent]):
# Reason: https://github.com/tiangolo/fastapi/discussions/10138
if not user.is_debug:
self._tools = self.tools_registry.get_on_behalf(user, gl_version)
self._user = user
async def stream(self, *, inputs: TypeAgentInputs) -> AsyncIterator[TypeAgentEvent]:
inputs.tools = self.tools
agent: ReActAgent = self.agent_factory(model_metadata=inputs.model_metadata)
agent: ReActAgent = self.agent_factory(
user=self._user, model_metadata=inputs.model_metadata
)
tools_by_name = self.tools_by_name
......
......@@ -10,7 +10,6 @@ from ai_gateway.chat.tools.gitlab import (
MergeRequestReader,
SelfHostedGitlabDocumentation,
)
from ai_gateway.feature_flags import FeatureFlag, is_feature_enabled
__all__ = ["DuoChatToolsRegistry"]
......@@ -29,6 +28,7 @@ class DuoChatToolsRegistry(BaseToolsRegistry):
EpicReader(),
IssueReader(),
MergeRequestReader(),
CommitReader(),
]
if self.self_hosted_documentation_enabled:
......@@ -36,9 +36,6 @@ class DuoChatToolsRegistry(BaseToolsRegistry):
else:
tools.append(GitlabDocumentation())
if is_feature_enabled(FeatureFlag.AI_COMMIT_READER_FOR_CHAT):
tools.append(CommitReader())
return tools
def get_on_behalf(self, user: StarletteUser, gl_version: str) -> list[BaseTool]:
......
......@@ -67,7 +67,6 @@ USE_CASES_MODELS_MAP = {
KindAmazonQModel.AMAZON_Q,
},
KindUseCase.CODE_GENERATIONS: {
KindAnthropicModel.CLAUDE_2_0,
KindAnthropicModel.CLAUDE_2_1,
KindVertexTextModel.CODE_BISON_002,
KindAnthropicModel.CLAUDE_3_SONNET,
......@@ -91,16 +90,12 @@ USE_CASES_MODELS_MAP = {
SAAS_PROMPT_MODEL_MAP = {
"^1.0.0": {
"model_provider": ModelProvider.ANTHROPIC,
"model_version": KindAnthropicModel.CLAUDE_3_5_SONNET,
"model_version": KindAnthropicModel.CLAUDE_3_5_SONNET_V2,
},
"1.0.0": {
"model_provider": ModelProvider.ANTHROPIC,
"model_version": KindAnthropicModel.CLAUDE_3_5_SONNET,
},
"1.0.1-dev": {
"model_provider": ModelProvider.ANTHROPIC,
"model_version": KindAnthropicModel.CLAUDE_3_5_SONNET_V2,
},
"2.0.0": {
"model_provider": ModelProvider.VERTEX_AI,
"model_version": KindAnthropicModel.CLAUDE_3_5_SONNET,
......
......@@ -101,10 +101,10 @@ class ContainerCodeGenerations(containers.DeclarativeContainer):
snowplow_instrumentator=snowplow_instrumentator,
)
# Default use case with claude.2.0
# Default use case with claude.2.1
anthropic_default = providers.Factory(
anthropic_factory,
model__name=KindAnthropicModel.CLAUDE_2_0,
model__name=KindAnthropicModel.CLAUDE_2_1,
)
......@@ -137,7 +137,7 @@ class ContainerCodeCompletions(containers.DeclarativeContainer):
overrides={
PostProcessorOperation.FIX_END_BLOCK_ERRORS: PostProcessorOperation.FIX_END_BLOCK_ERRORS_LEGACY,
},
exclude=config.excl_post_proc,
exclude=config.excl_post_process,
).provider,
snowplow_instrumentator=snowplow_instrumentator,
)
......@@ -170,7 +170,7 @@ class ContainerCodeCompletions(containers.DeclarativeContainer):
),
post_processor=providers.Factory(
PostProcessorCompletions,
exclude=config.excl_post_proc,
exclude=config.excl_post_process,
extras=[
PostProcessorOperation.FILTER_SCORE,
],
......
......@@ -157,7 +157,7 @@ class ConfigAmazonQ(BaseModel):
class ConfigFeatureFlags(BaseModel):
disallowed_flags: dict[str, Set[str]] = {}
excl_post_proc: list[str] = []
excl_post_process: list[str] = []
fireworks_qwen_score_threshold: float = -9999.0
......
......@@ -68,6 +68,7 @@ class ContainerApplication(containers.DeclarativeContainer):
pkg_models_v2 = providers.Container(
ContainerModelsV2,
config=config,
integrations=integrations,
)
pkg_prompts = providers.Container(
ContainerPrompts,
......
......@@ -9,7 +9,6 @@ from enum import StrEnum
class FeatureFlag(StrEnum):
# Definition: https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/feature_flags/ops/expanded_ai_logging.yml
EXPANDED_AI_LOGGING = "expanded_ai_logging"
AI_COMMIT_READER_FOR_CHAT = "ai_commit_reader_for_chat"
ENABLE_ANTHROPIC_PROMPT_CACHING = "enable_anthropic_prompt_caching"
......
"""
Amazon Q Chat Integration Module.
Provides the main chat interface for interacting with Amazon Q, handling message generation,
streaming responses, and error management.
Example Usage:
# Initialize the chat model
amazon_q = ChatAmazonQ(
amazon_q_client_factory=AmazonQClientFactory(),
model="amazon_q"
)
# Generate a response
messages = [
SystemMessage(content="You are a helpful assistant"),
HumanMessage(content="Hello, how can you help me?")
]
response = amazon_q.generate(messages)
# Stream responses
for chunk in amazon_q.stream(messages):
print(chunk.content)
"""
import os
from dataclasses import field
from typing import Any, Dict, Iterator, List, Optional, cast
from botocore.exceptions import ClientError
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from requests.exceptions import Timeout
from ai_gateway.api.auth_utils import StarletteUser
from ai_gateway.integrations.amazon_q.error_handers import AWSErrorHandler
from ai_gateway.integrations.amazon_q.message_processor import (
MessageProcessor,
ProcessedMessage,
)
from ai_gateway.integrations.amazon_q.response_handlers import (
ResponseHandler,
StreamEvent,
)
class ChatAmazonQ(BaseChatModel):
"""
Main chat model class for Amazon Q integration.
Handles message generation, streaming responses, and error management.
The class is organized into these main sections:
1. Initialization and Properties
2. Message Generation and Processing
3. Streaming Functionality
4. Error Handling
5. Client Management
"""
# Section 1: Initialization and Properties
amazon_q_client_factory: Any
model: str = field(default="amazon_q")
message_processor: MessageProcessor = field(default_factory=MessageProcessor)
response_handler: ResponseHandler = field(default_factory=ResponseHandler)
def __post_init__(self) -> None:
"""
Post-initialization setup.
Called after dataclass initialization.
"""
self.metadata: Dict[str, Any] = {}
super().__init__()
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get identifying parameters for the model."""
return {"model": self.model}
@property
def _llm_type(self) -> str:
"""Get the LLM type identifier."""
return "amazon_q"
# Section 2: Message Generation and Processing
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""
Generate a response for the given messages.
Args:
messages: List of input messages to process
stop: Optional stop sequences for generation
run_manager: Optional callback manager
kwargs: Additional keyword arguments
Returns:
ChatResult: Generated response wrapped in a ChatResult object
"""
message: str = self._build_response(messages=messages)
return self._create_chat_result(message)
def _create_chat_result(self, message: str) -> ChatResult:
"""
Create a ChatResult object from a message.
Args:
message: The message to wrap in a ChatResult
Returns:
ChatResult: Formatted chat result with the message
"""
return ChatResult(
generations=[self._create_chat_generation(message)],
llm_output=self._create_llm_output(),
)
def _create_chat_generation(self, message: str) -> ChatGeneration:
"""
Create a ChatGeneration object from a message.
Args:
message: The message content
Returns:
ChatGeneration: Wrapped message in a ChatGeneration object
"""
return ChatGeneration(message=AIMessage(content=message))
def _create_llm_output(self) -> Dict[str, Any]:
"""
Create the LLM output dictionary with metadata.
Returns:
Dict[str, Any]: Dictionary containing token usage and model information
"""
return {"token_usage": 100, "model": "amazon_q"}
def _build_response(self, messages: List[BaseMessage]):
"""
Build a response from the given messages.
Args:
messages: List of messages to process
Returns:
str: The built response from Amazon Q
"""
current_user: StarletteUser = self._get_current_user()
q_client: Any = self._get_client(current_user)
processed_message: ProcessedMessage = self._process_messages(
messages, current_user
)
return self._send_chat_message(q_client, processed_message)
def _process_messages(
self, messages: List[BaseMessage], current_user: StarletteUser
) -> ProcessedMessage:
"""
Process the input messages for the current user.
Args:
messages: List of messages to process
current_user: The current user
Returns:
ProcessedMessage: Processed message ready for sending
"""
return self.message_processor.process_messages(messages, current_user)
def _create_chat_message_params(
self, processed_message: ProcessedMessage
) -> Dict[str, Any]:
"""
Create parameters for sending a chat message.
Args:
processed_message: The processed message
Returns:
Dict[str, Any]: Parameters for the chat message
"""
return {
"message": processed_message.content,
"conversation_id": processed_message.conversation_id,
"history": processed_message.history,
}
# Section 3: Streaming Functionality
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
"""
Stream responses for the given messages.
Args:
messages: List of input messages to process
stop: Optional stop sequences for generation
run_manager: Optional callback manager
kwargs: Additional keyword arguments
Yields:
ChatGenerationChunk: Chunks of the generated response
"""
try:
response: Dict[str, Any] = self._build_response(messages=messages)
yield from self._handle_stream(response["responseStream"])
except Exception as e:
yield from self._handle_stream_error(e)
def _handle_stream(
self, stream: Iterator[Dict[str, Any]]
) -> Iterator[ChatGenerationChunk]:
"""
Handle the streaming response from Amazon Q.
Args:
stream: Iterator of response events
Yields:
ChatGenerationChunk: Response content chunks
"""
try:
yield from self._process_stream_events(stream)
finally:
self._close_stream(stream)
def _process_stream_events(
self, stream: Iterator[Dict[str, Any]]
) -> Iterator[ChatGenerationChunk]:
"""
Process individual events from the response stream.
Args:
stream: Iterator of response events
Yields:
ChatGenerationChunk: Processed content chunks
"""
for event in stream:
if not isinstance(event, dict):
yield self.response_handler.create_error_chunk(
"Invalid event format: not a dictionary"
)
continue
# Cast the validated dictionary to StreamEvent
stream_event = cast(StreamEvent, event)
response = self.response_handler.process_stream_event(stream_event)
if response.error:
yield self.response_handler.create_error_chunk(response.error)
else:
yield self.response_handler.create_content_chunk(response.content)
def _close_stream(self, stream: Iterator[Dict[str, Any]]) -> None:
"""
Safely close the response stream.
Args:
stream: The stream to close
"""
if hasattr(stream, "close") and callable(getattr(stream, "close")):
try:
stream.close()
except Exception:
pass
# Section 4: Error Handling
def _handle_stream_error(self, error: Exception) -> Iterator[ChatGenerationChunk]:
"""
Handle different types of streaming errors.
Args:
error: The exception that occurred
Yields:
ChatGenerationChunk: Error message chunks
"""
if isinstance(error, Timeout):
yield self._create_timeout_error()
elif isinstance(error, ClientError):
yield self._create_aws_error(error)
else:
yield self._create_generic_error(error)
def _create_timeout_error(self) -> ChatGenerationChunk:
"""
Create an error chunk for timeout errors.
Returns:
ChatGenerationChunk: Timeout error message
"""
return self.response_handler.create_error_chunk(
"Connection timed out while receiving data from Amazon Q."
)
def _create_aws_error(self, error: ClientError) -> ChatGenerationChunk:
"""
Create an error chunk for AWS-specific errors.
Args:
error: The AWS client error
Returns:
ChatGenerationChunk: AWS error message
"""
error_config = AWSErrorHandler.handle_client_error(error)
return self.response_handler.create_error_chunk(
f"({error_config.code}): {error_config.message}"
)
def _create_generic_error(self, error: Exception) -> ChatGenerationChunk:
"""
Create an error chunk for general exceptions.
Args:
error: The exception that occurred
Returns:
ChatGenerationChunk: Generic error message
"""
return self.response_handler.create_error_chunk(str(error))
# Section 5: Client Management
def _get_current_user(self) -> StarletteUser:
"""
Get the current user from metadata.
Returns:
StarletteUser: The current user making the request
"""
return self.metadata["user"]
def _get_client(self, current_user: StarletteUser) -> Any:
"""
Get an Amazon Q client for the current user.
Args:
current_user: The current user
Returns:
Any: Configured Amazon Q client
"""
role_arn: Optional[str] = self._get_role_arn()
return self._create_client(current_user, role_arn)
def _get_role_arn(self) -> Optional[str]:
"""
Get the AWS role ARN from environment variables.
Returns:
Optional[str]: The role ARN if configured
"""
return os.environ.get("AWS_ROLE_ARN")
def _create_client(
self, current_user: StarletteUser, role_arn: Optional[str]
) -> Any:
"""
Create an Amazon Q client.
Args:
current_user: The current user
role_arn: Optional role ARN for AWS authentication
Returns:
Any: Configured Amazon Q client
"""
return self.amazon_q_client_factory.get_client(
current_user=current_user,
role_arn=role_arn,
)
def _send_chat_message(
self, q_client: Any, processed_message: ProcessedMessage
) -> str:
"""
Send a chat message to Amazon Q.
Args:
q_client: The Amazon Q client
processed_message: The processed message to send
Returns:
str: Response from Amazon Q
"""
return q_client.send_chat_message(
self._create_chat_message_params(processed_message)
)
......@@ -136,6 +136,14 @@ class AmazonQClient:
raise
@raise_aws_errors
def send_chat_message(self, payload):
try:
return self._send_message(payload)
except ClientError as ex:
raise ex
@raise_aws_errors
def _create_o_auth_app_connection(self, **params):
self.client.create_o_auth_app_connection(**params)
......@@ -155,7 +163,18 @@ class AmazonQClient:
event=payload,
)
def _send_message(self, payload):
print("DEBUG [AmazonQClient]: _send_message payload", payload)
return self.client.send_message(
message=payload["message"], conversationId=payload["conversation_id"]
)
def _retry_send_event(self, error, code, payload):
self._is_retry(error, code)
return self._send_event(payload)
def _is_retry(self, error, code):
match str(error.response.get("reason")):
case AccessDeniedExceptionReason.GITLAB_EXPIRED_IDENTITY:
self.client.create_auth_grant(code=code)
......@@ -166,5 +185,3 @@ class AmazonQClient:
status_code=status.HTTP_403_FORBIDDEN,
detail=str(error),
)
return self._send_event(payload)