Skip to content
Snippets Groups Projects

Use Claude 3 Sonnet for Duo Chat Zero Shot

Merged Jessie Young requested to merge jy-claude-3-for-real-this-time into master
4 files
+ 44
39
Compare changes
  • Side-by-side
  • Inline
Files
4
@@ -12,12 +12,9 @@ class Anthropic < Base
@@ -12,12 +12,9 @@ class Anthropic < Base
def self.prompt(options)
def self.prompt(options)
if Feature.enabled?(:ai_claude_3_sonnet, options.fetch(:current_user))
if Feature.enabled?(:ai_claude_3_sonnet, options.fetch(:current_user))
prompt = [
history = truncated_conversation_list(options[:conversation])
Utils::Prompt.default_system_prompt_json
# Utils::Prompt.as_user_json(base_prompt(options))
] + ::Gitlab::Json.parse(base_prompt(options))
Requests::Anthropic.prompt(prompt)
text = history + base_prompt(options)
else
else
human_role = ROLE_NAMES[Llm::AiMessage::ROLE_USER]
human_role = ROLE_NAMES[Llm::AiMessage::ROLE_USER]
@@ -25,28 +22,23 @@ def self.prompt(options)
@@ -25,28 +22,23 @@ def self.prompt(options)
\n\n#{human_role}: #{base_prompt(options)}
\n\n#{human_role}: #{base_prompt(options)}
PROMPT
PROMPT
history = truncated_conversation(options[:conversation], Requests::Anthropic::PROMPT_SIZE - text.size)
history = truncated_conversation(
text = [history, text].join if history.present?
options[:conversation],
 
Requests::Anthropic::PROMPT_SIZE - text.size
 
)
Requests::Anthropic.prompt(text)
text = [history, text].join if history.present?
end
end
 
 
Requests::Anthropic.prompt(text)
end
end
traceable :prompt, name: 'Build prompt', run_type: 'prompt', class_method: true
traceable :prompt, name: 'Build prompt', run_type: 'prompt', class_method: true
def self.truncated_conversation_list(_conversaton, limit)
def self.truncated_conversation_list(conversation)
return [] if conversation.blank?
return [] if conversation.blank?
conversation.reverse_each.map do |result, message|
conversation.map do |message, _|
role = ROLE_NAMES[message.role]
{ role: message.role.to_sym, content: message.content }
buffer = { role: role, content: message.content }
break result if buffer.size + result.size > limit
next result unless message.role == Llm::AiMessage::ROLE_USER
new_str = "#{buffer}#{result}" # Reset the buffer for the next conversation block
new_str
end
end
end
end
Loading