Skip to content
Snippets Groups Projects
Commit 3822cfb1 authored by Nicolas Dular's avatar Nicolas Dular
Browse files

Do not store chat messages by default

We no longer want to store and show AI messages on the chat, if not
explicitly enabled by the feature. It is now only enabled for the `chat`
AI action.
We do this by setting `skip_cache = true` by default.

It also fixes a bug where the `skip_cache` was not passed along properly
to the GraphqlSubscriptionResponseService.

Changelog: fixed
EE: true
parent b41e0d45
No related branches found
No related tags found
1 merge request!127511Do not store chat messages by default
Showing
with 25 additions and 18 deletions
......@@ -49,7 +49,8 @@ def worker_perform(user, resource, action_name, options)
resource_id: resource&.id,
resource_class: resource&.class&.name,
request_id: request_id,
action_name: action_name
action_name: action_name,
options: options
)
if options[:sync] == true
......
......@@ -5,7 +5,7 @@ class ChatService < BaseService
private
def perform
worker_perform(user, resource, :chat, options)
worker_perform(user, resource, :chat, options.merge(skip_cache: false))
end
def valid?
......
......@@ -27,7 +27,7 @@ def initialize(user, resource, method, options = {})
def execute
return error('Unknown method') unless METHODS.key?(method)
result = METHODS[method].new(user, resource, options).execute
result = METHODS[method].new(user, resource, options.merge(skip_cache: true)).execute
track_snowplow_event(result)
return success(result.payload) if result.success?
......
......@@ -28,7 +28,7 @@ def perform(user_id, resource_id, resource_class, ai_action_name, options = {})
resource = find_resource(resource_id, resource_class)
return if resource && !user.can?("read_#{resource.to_ability_name}", resource)
params = options.extract!(:request_id, :internal_request)
params = options.extract!(:request_id, :internal_request, :skip_cache)
ai_completion = ::Gitlab::Llm::CompletionsFactory.completion(ai_action_name.to_sym, params)
logger.debug(message: "Getting Completion Service from factory", class_name: ai_completion.class.name)
......
......@@ -14,7 +14,7 @@ def initialize(ai_prompt_class, params = {})
attr_reader :ai_prompt_class, :params
def response_options
params.slice(:request_id, :internal_request)
params.slice(:request_id, :internal_request, :skip_cache)
end
end
end
......
......@@ -26,7 +26,8 @@ def execute
logger.debug(
message: "Broadcasting AI response",
data: data
data: data,
options: options
)
response_data = data.slice(:request_id, :errors, :role).merge(content: data[:response_body])
......
......@@ -10,7 +10,7 @@ class ExplainVulnerability < Gitlab::Llm::Completions::Base
def execute(user, vulnerability, options)
unless vertex_ai?(vulnerability)
return ::Gitlab::Llm::OpenAi::Completions::ExplainVulnerability
.new(ai_prompt_class)
.new(ai_prompt_class, params)
.execute(user, vulnerability, options)
end
......
......@@ -30,7 +30,11 @@
it 'falls back to the OpenAI implementation' do
options = {}
allow_next_instance_of(::Gitlab::Llm::OpenAi::Completions::ExplainVulnerability) do |completion|
expect_next_instance_of(
::Gitlab::Llm::OpenAi::Completions::ExplainVulnerability,
prompt_class,
{ request_id: 'uuid' }
) do |completion|
expect(completion).to receive(:execute).with(user, vulnerability, options)
end
......
......@@ -33,7 +33,7 @@
it 'successfully performs a chat request' do
expect(Llm::CompletionWorker).to receive(:perform_async).with(
current_user.id, nil, nil, :chat, {
content: "summarize", markup_format: :raw, request_id: an_instance_of(String)
content: "summarize", markup_format: :raw, request_id: an_instance_of(String), skip_cache: false
}
)
......@@ -45,7 +45,7 @@
it 'successfully performs a request' do
expect(Llm::CompletionWorker).to receive(:perform_async).with(
current_user.id, resource.id, "Issue", :chat, {
content: "summarize", markup_format: :raw, request_id: an_instance_of(String)
content: "summarize", markup_format: :raw, request_id: an_instance_of(String), skip_cache: false
}
)
......@@ -61,7 +61,7 @@
it 'successfully performs a request' do
expect(Llm::CompletionWorker).to receive(:perform_async).with(
current_user.id, current_user.id, "User", :chat, {
content: "summarize", markup_format: :raw, request_id: an_instance_of(String)
content: "summarize", markup_format: :raw, request_id: an_instance_of(String), skip_cache: false
}
)
......
......@@ -51,7 +51,7 @@
allow(SecureRandom).to receive(:uuid).and_return(uuid)
expect(Llm::CompletionWorker).to receive(:perform_async).with(
current_user.id, project.id, "Project", :explain_code,
{ markup_format: :raw, messages: messages, request_id: uuid }
{ markup_format: :raw, messages: messages, request_id: uuid, skip_cache: true }
)
post_graphql_mutation(mutation, current_user: current_user)
......
......@@ -47,6 +47,7 @@
:fill_in_merge_request_template,
{
markup_format: :raw,
skip_cache: true,
request_id: an_instance_of(String),
source_project_id: project.id.to_s,
source_branch: 'feature',
......
......@@ -34,7 +34,7 @@
it 'successfully performs an generate commit message request' do
expect(Llm::CompletionWorker).to receive(:perform_async).with(
current_user.id, merge_request.id, "MergeRequest", :generate_commit_message, {
markup_format: :raw, request_id: an_instance_of(String)
markup_format: :raw, request_id: an_instance_of(String), skip_cache: true
}
)
......
......@@ -35,7 +35,7 @@
it 'successfully performs an explain code request' do
expect(Llm::CompletionWorker).to receive(:perform_async).with(
current_user.id, merge_request.id, "MergeRequest", :generate_test_file, {
file_path: file_path, markup_format: :raw, request_id: an_instance_of(String)
file_path: file_path, markup_format: :raw, request_id: an_instance_of(String), skip_cache: true
}
)
......
......@@ -11,7 +11,7 @@
let(:resource) { issue }
let(:stage_check_available) { true }
let(:content) { "Summarize issue" }
let(:options) { { content: content } }
let(:options) { { content: content, skip_cache: false } }
subject { described_class.new(user, resource, options) }
......
......@@ -9,7 +9,7 @@
let(:method) { :summarize_comments }
let(:resource) { nil }
let(:params) { {} }
let(:options) { { request_id: 'uuid' }.merge(params) }
let(:options) { { request_id: 'uuid', skip_cache: true }.merge(params) }
subject { described_class.new(user, resource, method, options).execute }
......
......@@ -19,7 +19,7 @@
let(:options) { { 'key' => 'value' } }
let(:ai_template) { { method: :completions, prompt: 'something', options: { temperature: 0.7 } } }
let(:ai_action_name) { :summarize_comments }
let(:params) { options.merge(request_id: 'uuid', internal_request: true) }
let(:params) { options.merge(request_id: 'uuid', internal_request: true, skip_cache: true) }
subject { described_class.new.perform(user_id, resource_id, resource_type, ai_action_name, params) }
......@@ -29,7 +29,7 @@
expect(Gitlab::Llm::CompletionsFactory)
.to receive(:completion)
.with(ai_action_name, match({ internal_request: true, request_id: 'uuid' }))
.with(ai_action_name, match({ internal_request: true, request_id: 'uuid', skip_cache: true }))
.and_return(completion)
expect(completion)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment