Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions config/settings.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@ discourse_ai:
ai_toxicity_enabled:
default: false
client: true
hidden: true
ai_toxicity_inference_service_api_endpoint:
default: "https://disorder-testing.demo-by-discourse.com"
default: ""
ai_toxicity_inference_service_api_endpoint_srv:
default: ""
hidden: true
Expand Down Expand Up @@ -72,9 +73,11 @@ discourse_ai:
- sentiment
- emotion

ai_nsfw_detection_enabled: false
ai_nsfw_detection_enabled:
default: false
hidden: true
ai_nsfw_inference_service_api_endpoint:
default: "https://nsfw-testing.demo-by-discourse.com"
default: ""
ai_nsfw_inference_service_api_endpoint_srv:
default: ""
hidden: true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
before do
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end

fab!(:chat_message)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
before do
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end

fab!(:post)
Expand Down
2 changes: 2 additions & 0 deletions spec/lib/modules/toxicity/toxicity_classification_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
describe DiscourseAi::Toxicity::ToxicityClassification do
fab!(:target) { Fabricate(:post) }

before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }

describe "#request" do
it "returns the classification and the model used for it" do
ToxicityInferenceStubs.stub_post_classification(target, toxic: false)
Expand Down
5 changes: 4 additions & 1 deletion spec/plugin_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@
require_relative "support/toxicity_inference_stubs"

describe Plugin::Instance do
before { SiteSetting.discourse_ai_enabled = true }
before do
SiteSetting.discourse_ai_enabled = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"
end

describe "on reviewable_transitioned_to event" do
fab!(:post)
Expand Down
2 changes: 2 additions & 0 deletions spec/shared/chat_message_classificator_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }

before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }

describe "#classify!" do
before { ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true) }

Expand Down
2 changes: 2 additions & 0 deletions spec/shared/post_classificator_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
let(:model) { DiscourseAi::Toxicity::ToxicityClassification.new }
let(:classification) { described_class.new(model) }

before { SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com" }

describe "#classify!" do
before { ToxicityInferenceStubs.stub_post_classification(post, toxic: true) }

Expand Down
1 change: 1 addition & 0 deletions spec/system/toxicity/reviewable_ai_chat_message_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
sign_in(admin)
SiteSetting.ai_toxicity_enabled = true
SiteSetting.ai_toxicity_flag_automatically = true
SiteSetting.ai_toxicity_inference_service_api_endpoint = "http://example.com"

ToxicityInferenceStubs.stub_chat_message_classification(chat_message, toxic: true)

Expand Down