@@ -2,6 +2,7 @@ import { URL } from 'node:url'
2
2
import { render } from 'ejs'
3
3
import { omit , pick } from 'lodash'
4
4
import { isObjectIdOrHexString , Types } from 'mongoose'
5
+ import { pipe } from 'rxjs'
5
6
import type { OnModuleInit } from '@nestjs/common'
6
7
import type { ReturnModelType } from '@typegoose/typegoose/lib/types'
7
8
import type { WriteBaseModel } from '~/shared/model/write-base.model'
@@ -11,6 +12,7 @@ import type {
11
12
CommentModelRenderProps ,
12
13
} from './comment.email.default'
13
14
15
+ import { JsonOutputToolsParser } from '@langchain/core/output_parsers/openai_tools'
14
16
import {
15
17
BadRequestException ,
16
18
forwardRef ,
@@ -113,6 +115,130 @@ export class CommentService implements OnModuleInit {
113
115
return this . databaseService . getModelByRefType ( type ) as any
114
116
}
115
117
118
+ /**
119
+ * 使用AI评估评论内容
120
+ * @param text 评论文本
121
+ * @param aiReviewType 评审类型
122
+ * @param aiReviewThreshold 阈值
123
+ * @returns 是否应该被标记为垃圾评论
124
+ */
125
+ private async evaluateCommentWithAI (
126
+ text : string ,
127
+ aiReviewType : 'score' | string ,
128
+ aiReviewThreshold : number ,
129
+ ) : Promise < boolean > {
130
+ const runnable = await this . aiService . getOpenAiChain ( )
131
+
132
+ // 评分模式
133
+ if ( aiReviewType === 'score' ) {
134
+ const scorePrompt = {
135
+ content : `分析以下评论是否包含不适当内容:${ text } \n\n评估其是否包含垃圾信息、诈骗、广告、有毒内容及整体质量。` ,
136
+ role : 'user' ,
137
+ }
138
+
139
+ try {
140
+ const response = await runnable
141
+ . bind ( {
142
+ tools : [
143
+ {
144
+ type : 'function' ,
145
+ function : {
146
+ name : 'comment_review' ,
147
+ description : '分析评论内容并给出风险评分' ,
148
+ parameters : {
149
+ type : 'object' ,
150
+ properties : {
151
+ score : {
152
+ type : 'number' ,
153
+ description : '风险评分,1-10,越高越危险' ,
154
+ } ,
155
+ hasSensitiveContent : {
156
+ type : 'boolean' ,
157
+ description : '是否包含政治敏感、色情、暴力或恐吓内容' ,
158
+ } ,
159
+ } ,
160
+ required : [ 'score' , 'hasSensitiveContent' ] ,
161
+ } ,
162
+ } ,
163
+ } ,
164
+ ] ,
165
+ } )
166
+ . pipe ( new JsonOutputToolsParser ( ) )
167
+ . invoke ( [ scorePrompt ] )
168
+
169
+ if ( ! response ) {
170
+ return false
171
+ }
172
+ const responseData = ( response [ 0 ] as any ) ?. args
173
+
174
+ // 如果包含敏感内容直接拒绝
175
+ if ( responseData . hasSensitiveContent ) {
176
+ return true
177
+ }
178
+ // 否则根据评分判断
179
+ return responseData . score > aiReviewThreshold
180
+ } catch ( error ) {
181
+ this . logger . error ( 'AI评审评分模式出错' , error )
182
+ return false
183
+ }
184
+ }
185
+ // 垃圾检测模式
186
+ else {
187
+ const spamPrompt = {
188
+ content : `检查以下评论是否不适当:${ text } \n\n分析其是否包含垃圾信息、广告、政治敏感内容、色情、暴力或低质量内容。` ,
189
+ role : 'user' ,
190
+ }
191
+
192
+ try {
193
+ const response = await runnable . invoke ( [ spamPrompt ] , {
194
+ tools : [
195
+ {
196
+ type : 'function' ,
197
+ function : {
198
+ name : 'spam_check' ,
199
+ description : '检查评论是否为垃圾内容' ,
200
+ parameters : {
201
+ type : 'object' ,
202
+ properties : {
203
+ isSpam : {
204
+ type : 'boolean' ,
205
+ description : '是否为垃圾内容' ,
206
+ } ,
207
+ hasSensitiveContent : {
208
+ type : 'boolean' ,
209
+ description : '是否包含政治敏感、色情、暴力或恐吓内容' ,
210
+ } ,
211
+ } ,
212
+ required : [ 'isSpam' , 'hasSensitiveContent' ] ,
213
+ } ,
214
+ } ,
215
+ } ,
216
+ ] ,
217
+ } )
218
+
219
+ const content = response . content . toString ( )
220
+ // 提取JSON部分
221
+ const jsonMatch = content . match ( / \{ .* \} / s)
222
+ if ( ! jsonMatch ) {
223
+ this . logger . warn ( 'AI评审返回格式异常,无法解析JSON' )
224
+ return false
225
+ }
226
+
227
+ const responseData = JSON . parse ( jsonMatch [ 0 ] )
228
+
229
+ // 如果包含敏感内容直接拒绝
230
+ if ( responseData . hasSensitiveContent ) {
231
+ return true
232
+ }
233
+ // 否则按照是否spam判断
234
+ return responseData . isSpam
235
+ } catch ( error ) {
236
+ this . logger . error ( 'AI评审垃圾检测模式出错' , error )
237
+ return false
238
+ }
239
+ }
240
+ }
241
+
116
242
async checkSpam ( doc : CommentModel ) {
117
243
const res = await ( async ( ) => {
118
244
const commentOptions = await this . configs . get ( 'commentOptions' )
@@ -150,22 +276,11 @@ export class CommentService implements OnModuleInit {
150
276
}
151
277
152
278
if ( commentOptions . aiReview ) {
153
- const openai = await this . aiService . getOpenAiChain ( )
154
- const { aiReviewType, aiReviewThreshold } = commentOptions
155
- const runnable = openai
156
-
157
- const prompt =
158
- aiReviewType === 'score'
159
- ? 'Check the comment and return a risk score directly. Higher means more risky (1-10). Outputs should only be a number'
160
- : 'Check if the comment is spam or not. Outputs should be true or false(Lowercase)'
161
-
162
- const result = ( await runnable . invoke ( [ `${ prompt } :${ doc . text } ` ] ) )
163
- . content
164
-
165
- if ( aiReviewType === 'score' ) {
166
- return ( result as any ) > aiReviewThreshold
167
- }
168
- return result === 'true'
279
+ return this . evaluateCommentWithAI (
280
+ doc . text ,
281
+ commentOptions . aiReviewType ,
282
+ commentOptions . aiReviewThreshold ,
283
+ )
169
284
}
170
285
return false
171
286
} ) ( )
0 commit comments