Skip to content

Commit d2956f3

Browse files
committed
feat: implement AI-based comment evaluation in CommentService
- Added a new private method `evaluateCommentWithAI` to assess comment content using AI, supporting both scoring and spam detection modes. - Integrated the `JsonOutputToolsParser` for handling AI responses. - Updated the `checkSpam` method to utilize the new AI evaluation method, improving comment moderation capabilities. Signed-off-by: Innei <tukon479@gmail.com>
1 parent 46704d2 commit d2956f3

File tree

1 file changed

+131
-16
lines changed

1 file changed

+131
-16
lines changed

apps/core/src/modules/comment/comment.service.ts

Lines changed: 131 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { URL } from 'node:url'
22
import { render } from 'ejs'
33
import { omit, pick } from 'lodash'
44
import { isObjectIdOrHexString, Types } from 'mongoose'
5+
import { pipe } from 'rxjs'
56
import type { OnModuleInit } from '@nestjs/common'
67
import type { ReturnModelType } from '@typegoose/typegoose/lib/types'
78
import type { WriteBaseModel } from '~/shared/model/write-base.model'
@@ -11,6 +12,7 @@ import type {
1112
CommentModelRenderProps,
1213
} from './comment.email.default'
1314

15+
import { JsonOutputToolsParser } from '@langchain/core/output_parsers/openai_tools'
1416
import {
1517
BadRequestException,
1618
forwardRef,
@@ -113,6 +115,130 @@ export class CommentService implements OnModuleInit {
113115
return this.databaseService.getModelByRefType(type) as any
114116
}
115117

118+
/**
119+
* 使用AI评估评论内容
120+
* @param text 评论文本
121+
* @param aiReviewType 评审类型
122+
* @param aiReviewThreshold 阈值
123+
* @returns 是否应该被标记为垃圾评论
124+
*/
125+
private async evaluateCommentWithAI(
126+
text: string,
127+
aiReviewType: 'score' | string,
128+
aiReviewThreshold: number,
129+
): Promise<boolean> {
130+
const runnable = await this.aiService.getOpenAiChain()
131+
132+
// 评分模式
133+
if (aiReviewType === 'score') {
134+
const scorePrompt = {
135+
content: `分析以下评论是否包含不适当内容:${text}\n\n评估其是否包含垃圾信息、诈骗、广告、有毒内容及整体质量。`,
136+
role: 'user',
137+
}
138+
139+
try {
140+
const response = await runnable
141+
.bind({
142+
tools: [
143+
{
144+
type: 'function',
145+
function: {
146+
name: 'comment_review',
147+
description: '分析评论内容并给出风险评分',
148+
parameters: {
149+
type: 'object',
150+
properties: {
151+
score: {
152+
type: 'number',
153+
description: '风险评分,1-10,越高越危险',
154+
},
155+
hasSensitiveContent: {
156+
type: 'boolean',
157+
description: '是否包含政治敏感、色情、暴力或恐吓内容',
158+
},
159+
},
160+
required: ['score', 'hasSensitiveContent'],
161+
},
162+
},
163+
},
164+
],
165+
})
166+
.pipe(new JsonOutputToolsParser())
167+
.invoke([scorePrompt])
168+
169+
if (!response) {
170+
return false
171+
}
172+
const responseData = (response[0] as any)?.args
173+
174+
// 如果包含敏感内容直接拒绝
175+
if (responseData.hasSensitiveContent) {
176+
return true
177+
}
178+
// 否则根据评分判断
179+
return responseData.score > aiReviewThreshold
180+
} catch (error) {
181+
this.logger.error('AI评审评分模式出错', error)
182+
return false
183+
}
184+
}
185+
// 垃圾检测模式
186+
else {
187+
const spamPrompt = {
188+
content: `检查以下评论是否不适当:${text}\n\n分析其是否包含垃圾信息、广告、政治敏感内容、色情、暴力或低质量内容。`,
189+
role: 'user',
190+
}
191+
192+
try {
193+
const response = await runnable.invoke([spamPrompt], {
194+
tools: [
195+
{
196+
type: 'function',
197+
function: {
198+
name: 'spam_check',
199+
description: '检查评论是否为垃圾内容',
200+
parameters: {
201+
type: 'object',
202+
properties: {
203+
isSpam: {
204+
type: 'boolean',
205+
description: '是否为垃圾内容',
206+
},
207+
hasSensitiveContent: {
208+
type: 'boolean',
209+
description: '是否包含政治敏感、色情、暴力或恐吓内容',
210+
},
211+
},
212+
required: ['isSpam', 'hasSensitiveContent'],
213+
},
214+
},
215+
},
216+
],
217+
})
218+
219+
const content = response.content.toString()
220+
// 提取JSON部分
221+
const jsonMatch = content.match(/\{.*\}/s)
222+
if (!jsonMatch) {
223+
this.logger.warn('AI评审返回格式异常,无法解析JSON')
224+
return false
225+
}
226+
227+
const responseData = JSON.parse(jsonMatch[0])
228+
229+
// 如果包含敏感内容直接拒绝
230+
if (responseData.hasSensitiveContent) {
231+
return true
232+
}
233+
// 否则按照是否spam判断
234+
return responseData.isSpam
235+
} catch (error) {
236+
this.logger.error('AI评审垃圾检测模式出错', error)
237+
return false
238+
}
239+
}
240+
}
241+
116242
async checkSpam(doc: CommentModel) {
117243
const res = await (async () => {
118244
const commentOptions = await this.configs.get('commentOptions')
@@ -150,22 +276,11 @@ export class CommentService implements OnModuleInit {
150276
}
151277

152278
if (commentOptions.aiReview) {
153-
const openai = await this.aiService.getOpenAiChain()
154-
const { aiReviewType, aiReviewThreshold } = commentOptions
155-
const runnable = openai
156-
157-
const prompt =
158-
aiReviewType === 'score'
159-
? 'Check the comment and return a risk score directly. Higher means more risky (1-10). Outputs should only be a number'
160-
: 'Check if the comment is spam or not. Outputs should be true or false(Lowercase)'
161-
162-
const result = (await runnable.invoke([`${prompt}:${doc.text}`]))
163-
.content
164-
165-
if (aiReviewType === 'score') {
166-
return (result as any) > aiReviewThreshold
167-
}
168-
return result === 'true'
279+
return this.evaluateCommentWithAI(
280+
doc.text,
281+
commentOptions.aiReviewType,
282+
commentOptions.aiReviewThreshold,
283+
)
169284
}
170285
return false
171286
})()

0 commit comments

Comments
 (0)