Skip to content

Commit c8d7ea4

Browse files
committed
FEATURE: Use a persona when running the AI triage automation script
1 parent 14de547 commit c8d7ea4

File tree

9 files changed

+159
-97
lines changed

9 files changed

+159
-97
lines changed

plugins/discourse-ai/config/locales/client.en.yml

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,9 @@ en:
132132
description: "In silent mode persona will receive the content but will not post anything on the forum - useful when performing triage using tools"
133133
llm_triage:
134134
fields:
135-
system_prompt:
136-
label: "System Prompt"
137-
description: "The prompt that will be used to triage, be sure for it to reply with a single word you can use to trigger the action"
135+
triage_persona:
136+
label: "Persona"
137+
description: "Persona used to triage, be sure for it to reply with a single word you can use to trigger the action"
138138
max_post_tokens:
139139
label: "Max Post Tokens"
140140
description: "The maximum number of tokens to scan using LLM triage"
@@ -174,12 +174,6 @@ en:
174174
reply_persona:
175175
label: "Reply Persona"
176176
description: "AI Persona to use for replies (must have default LLM), will be prioritized over canned reply"
177-
model:
178-
label: "Model"
179-
description: "Language model used for triage"
180-
temperature:
181-
label: "Temperature"
182-
description: "Temperature to use for the LLM. Increase to increase randomness (leave empty to use model default)"
183177
max_output_tokens:
184178
label: "Max output tokens"
185179
description: "When specified, sets an upper bound to the maximum number of tokens the model can generate. Respects LLM's max output tokens limit"
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
# frozen_string_literal: true
2+
3+
class SeedPersonasFromTriageScripts < ActiveRecord::Migration[8.0]
4+
def up
5+
script_fields = DB.query <<~SQL
6+
SELECT fields.id, fields.name, (fields.metadata->>'value') AS value, automations.name AS automation_name, automations.id AS automation_id
7+
FROM discourse_automation_fields fields
8+
INNER JOIN discourse_automation_automations automations ON automations.id = fields.automation_id
9+
WHERE fields.name IN ('model', 'system_prompt', 'temperature')
10+
AND automations.script = 'llm_triage'
11+
SQL
12+
13+
return if script_fields.empty?
14+
15+
script_fields =
16+
script_fields.reduce({}) do |acc, field|
17+
id = field.automation_id
18+
acc[id] = { "automation_id" => id, "name" => field.automation_name } if acc[id].nil?
19+
20+
acc[field.automation_id].merge!(field.name => field.value)
21+
22+
acc
23+
end
24+
25+
automation_to_persona_ids =
26+
script_fields.transform_values do |field|
27+
if field["system_prompt"].blank?
28+
nil
29+
else
30+
name =
31+
(
32+
if field["name"].pesent?
33+
"#{field["name"]} triage automation"
34+
else
35+
"Unnamed triage automation script ID #{field["automation_id"]}"
36+
end
37+
)
38+
temp = field["temperature"] || "NULL"
39+
row =
40+
"'#{name}', 'Seeded Persona for an LLM Triage script', FALSE, '#{field["system_prompt"]}', #{temp}, #{field["model"]}, NOW(), NOW()"
41+
42+
DB.query_single(<<~SQL)&.first
43+
INSERT INTO ai_personas (name, description, enabled, system_prompt, temperature, default_llm_id, created_at, updated_at)
44+
VALUES (#{row})
45+
RETURNING id
46+
SQL
47+
end
48+
end
49+
50+
pp automation_to_persona_ids
51+
52+
new_fields =
53+
automation_to_persona_ids
54+
.map do |k, v|
55+
if v.blank?
56+
nil
57+
else
58+
"(#{k}, 'triage_persona', json_build_object('value', #{v}), 'choices', 'script', NOW(), NOW())"
59+
end
60+
end
61+
.compact
62+
63+
DB.exec <<~SQL
64+
INSERT INTO discourse_automation_fields (automation_id, name, metadata, component, target, created_at, updated_at)
65+
VALUES #{new_fields.join(",")}
66+
SQL
67+
end
68+
69+
def down
70+
raise ActiveRecord::IrreversibleMigration
71+
end
72+
end

plugins/discourse-ai/discourse_automation/llm_triage.rb

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,19 @@
1313
field :include_personal_messages, component: :boolean
1414

1515
# Inputs
16-
field :model,
16+
field :triage_persona,
1717
component: :choices,
1818
required: true,
1919
extra: {
20-
content: DiscourseAi::Automation.available_models,
20+
content:
21+
DiscourseAi::Automation.available_persona_choices(
22+
require_user: false,
23+
require_default_llm: true,
24+
),
2125
}
22-
field :system_prompt, component: :message, required: false
2326
field :search_for_text, component: :text, required: true
2427
field :max_post_tokens, component: :text
2528
field :stop_sequences, component: :text_list, required: false
26-
field :temperature, component: :text
2729
field :max_output_tokens, component: :text
2830

2931
# Actions
@@ -60,6 +62,8 @@
6062
next if !include_personal_messages
6163
end
6264

65+
triage_persona_id = fields.dig("triage_persona", "value")
66+
6367
canned_reply = fields.dig("canned_reply", "value")
6468
canned_reply_user = fields.dig("canned_reply_user", "value")
6569
reply_persona_id = fields.dig("reply_persona", "value")
@@ -69,22 +73,14 @@
6973
next if post.user.username == canned_reply_user
7074
next if post.raw.strip == canned_reply.to_s.strip
7175

72-
system_prompt = fields.dig("system_prompt", "value")
7376
search_for_text = fields.dig("search_for_text", "value")
74-
model = fields.dig("model", "value")
7577

7678
category_id = fields.dig("category", "value")
7779
tags = fields.dig("tags", "value")
7880
hide_topic = fields.dig("hide_topic", "value")
7981
flag_post = fields.dig("flag_post", "value")
8082
flag_type = fields.dig("flag_type", "value")
8183
max_post_tokens = fields.dig("max_post_tokens", "value").to_i
82-
temperature = fields.dig("temperature", "value")
83-
if temperature == "" || temperature.nil?
84-
temperature = nil
85-
else
86-
temperature = temperature.to_f
87-
end
8884

8985
max_output_tokens = fields.dig("max_output_tokens", "value").to_i
9086
max_output_tokens = nil if max_output_tokens <= 0
@@ -110,9 +106,8 @@
110106

111107
DiscourseAi::Automation::LlmTriage.handle(
112108
post: post,
113-
model: model,
109+
triage_persona_id: triage_persona_id,
114110
search_for_text: search_for_text,
115-
system_prompt: system_prompt,
116111
category_id: category_id,
117112
tags: tags,
118113
canned_reply: canned_reply,
@@ -125,7 +120,6 @@
125120
max_post_tokens: max_post_tokens,
126121
stop_sequences: stop_sequences,
127122
automation: self.automation,
128-
temperature: temperature,
129123
max_output_tokens: max_output_tokens,
130124
action: context["action"],
131125
)

plugins/discourse-ai/lib/ai_helper/assistant.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ def find_ai_helper_model(helper_mode, persona_klass)
312312

313313
# Priorities are:
314314
# 1. Persona's default LLM
315-
# 2. SiteSetting.ai_default_llm_id (or newest LLM if not set)
315+
# 2. SiteSetting.ai_default_llm_model (or newest LLM if not set)
316316
def self.find_ai_helper_model(helper_mode, persona_klass)
317317
model_id = persona_klass.default_llm_id || SiteSetting.ai_default_llm_model
318318

plugins/discourse-ai/lib/automation/llm_triage.rb

Lines changed: 38 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,8 @@ module Automation
55
module LlmTriage
66
def self.handle(
77
post:,
8-
model:,
8+
triage_persona_id:,
99
search_for_text:,
10-
system_prompt:,
1110
category_id: nil,
1211
tags: nil,
1312
canned_reply: nil,
@@ -18,7 +17,6 @@ def self.handle(
1817
automation: nil,
1918
max_post_tokens: nil,
2019
stop_sequences: nil,
21-
temperature: nil,
2220
whisper: nil,
2321
reply_persona_id: nil,
2422
max_output_tokens: nil,
@@ -34,42 +32,55 @@ def self.handle(
3432
return
3533
end
3634

37-
llm = DiscourseAi::Completions::Llm.proxy(model)
35+
triage_persona = AiPersona.find(triage_persona_id)
36+
model_id = triage_persona.default_llm_id || SiteSetting.ai_default_llm_model
37+
return if model_id.blank?
38+
model = LlmModel.find(model_id)
3839

39-
s_prompt = system_prompt.to_s.sub("%%POST%%", "") # Backwards-compat. We no longer sub this.
40-
prompt = DiscourseAi::Completions::Prompt.new(s_prompt)
40+
bot =
41+
DiscourseAi::Personas::Bot.as(
42+
Discourse.system_user,
43+
persona: triage_persona.class_instance.new,
44+
model: model,
45+
)
4146

42-
content = "title: #{post.topic.title}\n#{post.raw}"
47+
input = "title: #{post.topic.title}\n#{post.raw}"
4348

44-
content =
45-
llm.tokenizer.truncate(
46-
content,
49+
input =
50+
model.tokenizer_class.truncate(
51+
input,
4752
max_post_tokens,
4853
strict: SiteSetting.ai_strict_token_counting,
4954
) if max_post_tokens.present?
5055

5156
if post.upload_ids.present?
52-
content = [content]
53-
content.concat(post.upload_ids.map { |upload_id| { upload_id: upload_id } })
57+
input = [input]
58+
input.concat(post.upload_ids.map { |upload_id| { upload_id: upload_id } })
5459
end
5560

56-
prompt.push(type: :user, content: content)
61+
bot_ctx =
62+
DiscourseAi::Personas::BotContext.new(
63+
user: Discourse.system_user,
64+
skip_tool_details: true,
65+
feature_name: "llm_triage",
66+
messages: [{ type: :user, content: input }],
67+
)
5768

5869
result = nil
5970

60-
result =
61-
llm.generate(
62-
prompt,
63-
max_tokens: max_output_tokens,
64-
temperature: temperature,
65-
user: Discourse.system_user,
66-
stop_sequences: stop_sequences,
67-
feature_name: "llm_triage",
68-
feature_context: {
69-
automation_id: automation&.id,
70-
automation_name: automation&.name,
71-
},
72-
)&.strip
71+
llm_args = {
72+
max_tokens: max_output_tokens,
73+
stop_sequences: stop_sequences,
74+
feature_context: {
75+
automation_id: automation&.id,
76+
automation_name: automation&.name,
77+
},
78+
}
79+
80+
result = +""
81+
bot.reply(bot_ctx, llm_args: llm_args) do |partial, _, type|
82+
result << partial if type.blank?
83+
end
7384

7485
if result.present? && result.downcase.include?(search_for_text.downcase)
7586
user = User.find_by_username(canned_reply_user) if canned_reply_user.present?
@@ -92,6 +103,7 @@ def self.handle(
92103
end
93104
elsif canned_reply.present? && action != :edit
94105
post_type = whisper ? Post.types[:whisper] : Post.types[:regular]
106+
95107
PostCreator.create!(
96108
user,
97109
topic_id: post.topic_id,

plugins/discourse-ai/lib/embeddings/semantic_search.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def hypothetical_post_from(search_term)
201201

202202
# Priorities are:
203203
# 1. Persona's default LLM
204-
# 2. SiteSetting.ai_default_llm_id (or newest LLM if not set)
204+
# 2. SiteSetting.ai_default_llm_model (or newest LLM if not set)
205205
def find_ai_hyde_model(persona_klass)
206206
model_id = persona_klass.default_llm_id || SiteSetting.ai_default_llm_model
207207

plugins/discourse-ai/lib/summarization.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def chat_channel_summary(channel, time_window_in_hours)
5454

5555
# Priorities are:
5656
# 1. Persona's default LLM
57-
# 2. SiteSetting.ai_default_llm_id (or newest LLM if not set)
57+
# 2. SiteSetting.ai_default_llm_model (or newest LLM if not set)
5858
def find_summarization_model(persona_klass)
5959
model_id = persona_klass.default_llm_id || SiteSetting.ai_default_llm_model
6060

plugins/discourse-ai/spec/lib/discourse_automation/llm_triage_spec.rb

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
let(:automation) { Fabricate(:automation, script: "llm_triage", enabled: true) }
1212

1313
fab!(:llm_model)
14+
fab!(:ai_persona)
1415

1516
def add_automation_field(name, value, type: "text")
1617
automation.fields.create!(
@@ -27,9 +28,11 @@ def add_automation_field(name, value, type: "text")
2728
enable_current_plugin
2829

2930
SiteSetting.tagging_enabled = true
30-
add_automation_field("system_prompt", "hello %%POST%%")
31+
32+
ai_persona.update!(default_llm: llm_model)
33+
34+
add_automation_field("triage_persona", ai_persona.id)
3135
add_automation_field("search_for_text", "bad")
32-
add_automation_field("model", llm_model.id)
3336
add_automation_field("category", category.id, type: "category")
3437
add_automation_field("tags", %w[aaa bbb], type: "tags")
3538
add_automation_field("hide_topic", true, type: "boolean")
@@ -42,21 +45,17 @@ def add_automation_field(name, value, type: "text")
4245
it "can trigger via automation" do
4346
post = Fabricate(:post, raw: "hello " * 5000)
4447

45-
body = {
46-
model: "gpt-3.5-turbo-0301",
47-
usage: {
48-
prompt_tokens: 337,
49-
completion_tokens: 162,
50-
total_tokens: 499,
51-
},
52-
choices: [
53-
{ message: { role: "assistant", content: "bad" }, finish_reason: "stop", index: 0 },
54-
],
55-
}.to_json
48+
chunks = <<~RESPONSE
49+
data: {"id":"chatcmpl-B2VwlY6KzSDtHvg8pN1VAfRhhLFgn","object":"chat.completion.chunk","created":1739939159,"model": "gpt-3.5-turbo-0301","service_tier":"default","system_fingerprint":"fp_ef58bd3122","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"finish_reason":null}],"usage":null}
50+
51+
data: {"id":"chatcmpl-B2VwlY6KzSDtHvg8pN1VAfRhhLFgn","object":"chat.completion.chunk","created":1739939159,"model": "gpt-3.5-turbo-0301","service_tier":"default","system_fingerprint":"fp_ef58bd3122","choices":[{"index":0,"delta":{"content":"bad"},"finish_reason":null}],"usage":null}
52+
53+
data: [DONE]
54+
RESPONSE
5655

5756
WebMock.stub_request(:post, "https://api.openai.com/v1/chat/completions").to_return(
5857
status: 200,
59-
body: body,
58+
body: chunks,
6059
)
6160

6261
automation.running_in_background!
@@ -96,7 +95,7 @@ def add_automation_field(name, value, type: "text")
9695
# PM
9796
reply_user.update!(admin: true)
9897
add_automation_field("include_personal_messages", true, type: :boolean)
99-
add_automation_field("temperature", "0.2")
98+
ai_persona.update!(temperature: 0.2)
10099
add_automation_field("max_output_tokens", "700")
101100
post = Fabricate(:post, topic: personal_message)
102101

0 commit comments

Comments
 (0)