Nanny7 commited on
Commit
da53044
·
1 Parent(s): 7852aa8

Implement real AI-powered content audit with Hanyu Xinxie style cards

Browse files
Files changed (2) hide show
  1. app.py +371 -143
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,151 +1,381 @@
1
  """
2
- ContentGuardian - Content Audit Agent
3
- China · Simplified Chinese · Hanyu Xinxie Style · Text-based Version
4
- Optimized for Hugging Face Spaces
5
  """
6
  import gradio as gr
 
 
 
 
7
  import datetime
 
8
 
9
- def comprehensive_text_audit(text, keywords=""):
10
- """
11
- Comprehensive text audit - structured text output
12
- """
13
- if not text.strip():
14
- return "❌ Please enter text content"
15
 
16
- # Parse keywords
17
- keyword_list = [k.strip() for k in keywords.split(",") if k.strip()] if keywords else []
18
-
19
- # 1. Inappropriate advertising language detection
20
- inappropriate_words = [
21
- "绝对", "完全", "100%", "永远", "从不", "必须", "一定",
22
- "第一", "最好", "最佳", "最优", "最强", "最大", "最小",
23
- "极度", "超级", "顶级", "王牌", "冠军", "霸主",
24
- "立即", "马上", "瞬间", "秒杀", "急速", "飞速",
25
- "神奇", "奇迹", "万能", "无敌", "完美", "终极",
26
- "absolutely", "completely", "perfect", "ultimate", "best", "first",
27
- "immediately", "instantly", "magical", "miracle", "supreme"
28
- ]
29
-
30
- found_inappropriate = []
31
- for word in inappropriate_words:
32
- if word.lower() in text.lower():
33
- positions = []
34
- start = 0
35
- text_lower = text.lower()
36
- word_lower = word.lower()
37
- while True:
38
- pos = text_lower.find(word_lower, start)
39
- if pos == -1:
40
- break
41
- positions.append(pos)
42
- start = pos + 1
43
- if positions:
44
- found_inappropriate.append({
45
- "word": word,
46
- "count": len(positions),
47
- "positions": positions
48
- })
49
-
50
- # 2. Keyword marking
51
- found_keywords = []
52
- for keyword in keyword_list:
53
- if keyword.lower() in text.lower():
54
- positions = []
55
- start = 0
56
- text_lower = text.lower()
57
- keyword_lower = keyword.lower()
58
- while True:
59
- pos = text_lower.find(keyword_lower, start)
60
- if pos == -1:
61
- break
62
- positions.append(pos)
63
- start = pos + 1
64
- if positions:
65
- found_keywords.append({
66
- "keyword": keyword,
67
- "count": len(positions),
68
- "positions": positions
69
- })
70
-
71
- # 3. Generate structured report
72
- total_issues = len(found_inappropriate)
73
- severity = "低" if total_issues < 3 else "中" if total_issues < 6 else "高"
74
-
75
- # Build standardized text report
76
- report = f"""
77
- ╔══════════════════════════════════════════════════════════════╗
78
- ║ 📋 内容审核报告 ║
79
- ║ 中国·简体中文·汉语新解风格 ║
80
- ╚══════════════════════════════════════════════════════════════╝
81
 
82
- 📊 审核概览
83
- ├─ 总问题数:{total_issues}
84
- ├─ 严重程度:{severity}
85
- ├─ 文本长度:{len(text)} 字符
86
- └─ 审核时间:{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
87
 
88
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
 
 
 
 
 
89
 
90
- 🚫 不当广告用语检测 ({len(found_inappropriate)} 处)
91
- """
92
-
93
- if found_inappropriate:
94
- for i, item in enumerate(found_inappropriate, 1):
95
- report += f"""
96
- {i}. 词汇:"{item['word']}"
97
- └─ 出现次数:{item['count']} 次
98
- └─ 位置:{', '.join(map(str, item['positions']))}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  """
100
- else:
101
- report += "\n ✅ 未发现不当广告用语\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
- report += f"""
104
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- 🔍 关键词标记 ({len(found_keywords)} 处)
107
- """
 
 
 
 
 
 
108
 
109
- if found_keywords:
110
- for i, item in enumerate(found_keywords, 1):
111
- report += f"""
112
- {i}. 关键词:"{item['keyword']}"
113
- └─ 出现次数:{item['count']}
114
- └─ 位置:{', '.join(map(str, item['positions']))}
115
- """
116
- else:
117
- report += "\n ℹ️ 未指定关键词或关键词未出现\n"
 
118
 
119
- report += f"""
120
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
121
-
122
- 📋 详细分析
123
- ├─ 合规建议:
124
- """
 
125
 
126
- if found_inappropriate:
127
- report += f"""│ • 建议修改或删除 {len(found_inappropriate)} 个不当用词
128
- │ • 使用相对性表述替代绝对化用词
129
- │ • 避免夸大宣传和误导性表述
130
- """
131
- else:
132
- report += "│ • 内容表述规范,符合广告法要求\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
- report += f"""├─ 风险等级:{severity}
135
- └─ 处理建议:{"需要重点关注和修改" if severity == "" else "建议适当调整" if severity == "中" else "可以正常使用"}
136
-
137
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
- 🎨 设计理念
140
- 本审核系统融合中华文化与现代设计,体现内敛典雅之美。
141
- 采用汉语新解风格,注重文字的准确性和表达的规范性。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- ╔══════════════════════════════════════════════════════════════╗
144
- ║ 审核完成 - 感谢使用 ContentGuardian 内容审核智能体 ���
145
- ╚══════════════════════════════════════════════════════════════╝
146
- """
 
 
 
 
 
147
 
148
- return report
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
  # Create Gradio interface optimized for HF Spaces
151
  demo = gr.Interface(
@@ -162,32 +392,30 @@ demo = gr.Interface(
162
  lines=2
163
  )
164
  ],
165
- outputs=gr.Textbox(
166
- label="Audit Report",
167
- lines=25,
168
- max_lines=30
169
  ),
170
- title="🛡️ ContentGuardian - Content Audit Agent",
171
  description="""
172
- **China · Simplified Chinese · Hanyu Xinxie Style · Text-based Version**
173
 
174
- This system uses standardized structured text output, integrating Chinese culture with modern design concepts.
175
- Detects inappropriate advertising language, marks specified keywords, and generates detailed audit reports.
176
 
177
  **Key Features:**
178
- - 🚫 Inappropriate Advertising Language Detection (32+ common violation words)
179
- - 🔍 Precise Keyword Marking
180
- - 📊 Structured Audit Reports
181
  - 🎨 Hanyu Xinxie Aesthetic Style
182
  """,
183
  examples=[
184
  ["This product is absolutely effective, completely side-effect free, the first brand! Buy immediately, instant results!", "product,effect"],
185
  ["Our product quality is very good, trustworthy, welcome to purchase.", "product,quality"],
186
- ["Buy now, instant effect, 100% effective, absolutely satisfying!", "buy,effect"],
187
- ["这款产品效果绝对好,完全无副作用,第一品牌!立即购买,马上见效!", "产品,效果"]
188
  ],
189
  theme=gr.themes.Soft(),
190
- allow_flagging="never"
191
  )
192
 
193
  if __name__ == "__main__":
 
1
  """
2
+ ContentGuardian - AI-Powered Content Audit Agent
3
+ China · Simplified Chinese · Hanyu Xinxie Style · AI-Driven Version
4
+ Real AI content analysis with SVG visualization
5
  """
6
  import gradio as gr
7
+ import asyncio
8
+ import aiohttp
9
+ import json
10
+ import os
11
  import datetime
12
+ from typing import Dict, List
13
 
14
+ # AI Client for content analysis
15
+ class AIContentAnalyzer:
16
+ def __init__(self):
17
+ self.api_key = os.getenv("SILICONFLOW_API_KEY", "sk-your-api-key-here")
18
+ self.base_url = "https://api.siliconflow.cn/v1/chat/completions"
19
+ self.model = "Qwen/Qwen2.5-7B-Instruct"
20
 
21
+ async def analyze_content(self, text: str, keywords: List[str] = None) -> Dict:
22
+ """AI-powered content analysis"""
23
+ keywords_str = ", ".join(keywords) if keywords else "无"
24
+
25
+ prompt = f"""
26
+ 你是专业的内容审核专家,请对以下文本进行全面审核分析:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ 文本内容:{text}
29
+ 关键词:{keywords_str}
 
 
 
30
 
31
+ 请从以下维度进行分析:
32
+ 1. 错别字检测:识别文本中的拼写错误和用词不当
33
+ 2. 违规内容:检测不当言论、违法违规表述
34
+ 3. 虚假内容:识别可能的虚假信息和夸大宣传
35
+ 4. 不当广告用语:检测违反广告法的用词
36
+ 5. 关键词分析:分析指定关键词的使用情况
37
 
38
+ 请以JSON格式返回分析结果:
39
+ {{
40
+ "typos": [
41
+ {{"original": "错误词", "correct": "正确词", "reason": "错误原因"}}
42
+ ],
43
+ "violations": [
44
+ {{"content": "违规内容", "type": "违规类型", "suggestion": "修改建议"}}
45
+ ],
46
+ "fake_content": [
47
+ {{"content": "虚假内容", "type": "虚假类型", "suggestion": "修改建议"}}
48
+ ],
49
+ "inappropriate_ads": [
50
+ {{"word": "不当用词", "reason": "违规原因", "suggestion": "替代建议"}}
51
+ ],
52
+ "keywords_analysis": [
53
+ {{"keyword": "关键词", "frequency": 次数, "context": "使用语境", "assessment": "使用评价"}}
54
+ ],
55
+ "overall_assessment": {{
56
+ "risk_level": "低/中/高",
57
+ "total_issues": 问题总数,
58
+ "main_concerns": ["主要问题1", "主要问题2"],
59
+ "recommendations": ["建议1", "建议2"]
60
+ }}
61
+ }}
62
  """
63
+
64
+ try:
65
+ headers = {
66
+ "Authorization": f"Bearer {self.api_key}",
67
+ "Content-Type": "application/json"
68
+ }
69
+
70
+ data = {
71
+ "model": self.model,
72
+ "messages": [
73
+ {"role": "system", "content": "你是专业的内容审核专家,擅长识别各类内容问题并提供专业建议。"},
74
+ {"role": "user", "content": prompt}
75
+ ],
76
+ "temperature": 0.1,
77
+ "max_tokens": 2000
78
+ }
79
+
80
+ async with aiohttp.ClientSession() as session:
81
+ async with session.post(self.base_url, headers=headers, json=data) as response:
82
+ if response.status == 200:
83
+ result = await response.json()
84
+ ai_response = result["choices"][0]["message"]["content"]
85
+
86
+ # 尝试解析JSON
87
+ try:
88
+ # 清理AI返回的内容
89
+ if "```json" in ai_response:
90
+ ai_response = ai_response.split("```json")[1].split("```")[0]
91
+ elif "```" in ai_response:
92
+ ai_response = ai_response.split("```")[1].split("```")[0]
93
+
94
+ analysis_result = json.loads(ai_response.strip())
95
+ return analysis_result
96
+ except json.JSONDecodeError:
97
+ # 如果JSON解析失败,返回基础分析
98
+ return self._create_fallback_analysis(text, keywords)
99
+ else:
100
+ return self._create_fallback_analysis(text, keywords)
101
+
102
+ except Exception as e:
103
+ print(f"AI analysis error: {e}")
104
+ return self._create_fallback_analysis(text, keywords)
105
 
106
+ def _create_fallback_analysis(self, text: str, keywords: List[str] = None) -> Dict:
107
+ """备用分析方法"""
108
+ # 简单的关键词检测作为备用
109
+ inappropriate_words = ["绝对", "完全", "100%", "第一", "最好", "立即", "马上"]
110
+ found_inappropriate = [word for word in inappropriate_words if word in text]
111
+
112
+ return {
113
+ "typos": [],
114
+ "violations": [],
115
+ "fake_content": [],
116
+ "inappropriate_ads": [{"word": word, "reason": "可能违反广告法", "suggestion": "使用相对性表述"} for word in found_inappropriate],
117
+ "keywords_analysis": [{"keyword": kw, "frequency": text.count(kw), "context": "文本中", "assessment": "正常使用"} for kw in (keywords or []) if kw in text],
118
+ "overall_assessment": {
119
+ "risk_level": "中" if found_inappropriate else "低",
120
+ "total_issues": len(found_inappropriate),
121
+ "main_concerns": ["广告用语不当"] if found_inappropriate else [],
122
+ "recommendations": ["修改绝对化表述"] if found_inappropriate else ["内容表述规范"]
123
+ }
124
+ }
125
 
126
+ # 全局AI分析器
127
+ ai_analyzer = AIContentAnalyzer()
128
+
129
+ def generate_hanyu_xinxie_cards(analysis_result: Dict, original_text: str) -> str:
130
+ """生成汉语新解风格的SVG卡片"""
131
+ overall = analysis_result.get("overall_assessment", {})
132
+ total_issues = overall.get("total_issues", 0)
133
+ risk_level = overall.get("risk_level", "低")
134
 
135
+ # 莫兰迪色系配色
136
+ colors = {
137
+ "primary": "#B6B5A7", # 莫兰迪灰褐色
138
+ "secondary": "#9A8F8F", # 莫兰迪灰棕色
139
+ "accent": "#C5B4A0", # 莫兰迪淡棕色
140
+ "background": "#F2EDE9", # 莫兰迪浅米色
141
+ "text": "#5B5B5B", # 莫兰迪深灰色
142
+ "light_text": "#8C8C8C", # 莫兰迪中灰色
143
+ "divider": "#D1CBC3" # 莫兰迪浅灰色
144
+ }
145
 
146
+ # 根据风险等级调整颜色
147
+ if risk_level == "高":
148
+ colors["secondary"] = "#B85450" # 深红色
149
+ colors["accent"] = "#D4776B" # 浅红色
150
+ elif risk_level == "中":
151
+ colors["secondary"] = "#C4965A" # 橙色
152
+ colors["accent"] = "#D4A574" # 浅橙色
153
 
154
+ # 生成主要摘要卡片
155
+ main_card = f"""
156
+ <div style="
157
+ display: flex;
158
+ justify-content: center;
159
+ padding: 20px;
160
+ background: linear-gradient(135deg, #E8E3DE 0%, #F2EDE9 100%);
161
+ border-radius: 15px;
162
+ font-family: 'Microsoft YaHei', 'Noto Sans SC', sans-serif;
163
+ margin-bottom: 20px;
164
+ ">
165
+ <div style="
166
+ width: 350px;
167
+ background-color: {colors['background']};
168
+ border-radius: 20px;
169
+ box-shadow: 0 20px 40px rgba(0,0,0,0.1);
170
+ overflow: hidden;
171
+ position: relative;
172
+ ">
173
+ <!-- 标题区域 -->
174
+ <div style="
175
+ background-color: {colors['secondary']};
176
+ color: {colors['background']};
177
+ padding: 20px;
178
+ text-align: left;
179
+ ">
180
+ <h1 style="font-size: 20px; margin: 0; font-weight: 700;">🛡️ AI内容审核报告</h1>
181
+ <p style="font-size: 12px; margin: 5px 0 0 0; opacity: 0.9;">中国·简体中文·汉语新解风格</p>
182
+ </div>
183
+
184
+ <!-- 内容区域 -->
185
+ <div style="padding: 25px 20px;">
186
+ <!-- 主要统计 -->
187
+ <div style="text-align: left; margin-bottom: 20px;">
188
+ <div style="
189
+ font-size: 36px;
190
+ color: {colors['text']};
191
+ margin-bottom: 8px;
192
+ font-weight: bold;
193
+ position: relative;
194
+ ">
195
+ {total_issues} 处问题
196
+ <div style="
197
+ position: absolute;
198
+ left: 0;
199
+ bottom: -4px;
200
+ width: 60px;
201
+ height: 3px;
202
+ background-color: {colors['accent']};
203
+ "></div>
204
+ </div>
205
+ <div style="
206
+ font-size: 16px;
207
+ color: {colors['light_text']};
208
+ margin: 12px 0;
209
+ ">风险等级: {risk_level}</div>
210
+ </div>
211
+
212
+ <!-- 分隔线 -->
213
+ <div style="
214
+ width: 100%;
215
+ height: 1px;
216
+ background-color: {colors['divider']};
217
+ margin: 20px 0;
218
+ "></div>
219
+
220
+ <!-- AI分析结果 -->
221
+ <div style="
222
+ font-size: 14px;
223
+ line-height: 1.6;
224
+ text-align: left;
225
+ ">
226
+ <div style="
227
+ padding-left: 15px;
228
+ border-left: 3px solid {colors['accent']};
229
+ ">"""
230
 
231
+ # 添加具体问题类型
232
+ if analysis_result.get("typos"):
233
+ main_card += f'<p style="margin: 8px 0; color: {colors["text"]};">• 错别字问题: {len(analysis_result["typos"])} 处</p>'
234
+
235
+ if analysis_result.get("violations"):
236
+ main_card += f'<p style="margin: 8px 0; color: {colors["text"]};">• 违规内容: {len(analysis_result["violations"])} 处</p>'
237
+
238
+ if analysis_result.get("fake_content"):
239
+ main_card += f'<p style="margin: 8px 0; color: {colors["text"]};">• 虚假内容: {len(analysis_result["fake_content"])} 处</p>'
240
+
241
+ if analysis_result.get("inappropriate_ads"):
242
+ main_card += f'<p style="margin: 8px 0; color: {colors["text"]};">• 不当广告用语: {len(analysis_result["inappropriate_ads"])} 处</p>'
243
+
244
+ if analysis_result.get("keywords_analysis"):
245
+ main_card += f'<p style="margin: 8px 0; color: {colors["text"]};">• 关键词分析: {len(analysis_result["keywords_analysis"])} 个</p>'
246
+
247
+ if total_issues == 0:
248
+ main_card += f'<p style="margin: 8px 0; color: {colors["light_text"]};">✅ 未发现明显问题</p>'
249
+
250
+ main_card += f"""
251
+ </div>
252
+ </div>
253
+
254
+ <!-- AI建议 -->
255
+ <div style="
256
+ margin-top: 20px;
257
+ padding: 15px;
258
+ background-color: rgba(255,255,255,0.5);
259
+ border-radius: 10px;
260
+ border-left: 4px solid {colors['accent']};
261
+ ">
262
+ <h4 style="margin: 0 0 10px 0; color: {colors['text']}; font-size: 14px;">🤖 AI建议</h4>"""
263
+
264
+ recommendations = overall.get("recommendations", ["内容表述规范"])
265
+ for rec in recommendations[:2]: # 最多显示2个建议
266
+ main_card += f'<p style="margin: 5px 0; color: {colors["light_text"]}; font-size: 13px;">• {rec}</p>'
267
+
268
+ main_card += f"""
269
+ </div>
270
+
271
+ <!-- 中式印章风格装饰 -->
272
+ <div style="
273
+ text-align: center;
274
+ margin-top: 25px;
275
+ padding: 10px;
276
+ border: 2px solid {colors['accent']};
277
+ border-radius: 50%;
278
+ width: 70px;
279
+ height: 70px;
280
+ margin-left: auto;
281
+ margin-right: auto;
282
+ display: flex;
283
+ align-items: center;
284
+ justify-content: center;
285
+ ">
286
+ <span style="
287
+ font-size: 18px;
288
+ color: {colors['secondary']};
289
+ font-weight: bold;
290
+ ">AI审核</span>
291
+ </div>
292
+ </div>
293
+
294
+ <!-- 背景装饰文字 -->
295
+ <div style="
296
+ position: absolute;
297
+ font-size: 120px;
298
+ color: rgba(182, 181, 167, 0.05);
299
+ top: 50%;
300
+ left: 50%;
301
+ transform: translate(-50%, -50%);
302
+ font-weight: bold;
303
+ pointer-events: none;
304
+ z-index: 0;
305
+ ">AI</div>
306
+ </div>
307
+ </div>
308
+ """
309
+
310
+ return main_card
311
 
312
+ def generate_error_card(error_msg: str) -> str:
313
+ """生成错误信息卡片"""
314
+ return f"""
315
+ <div style="
316
+ display: flex;
317
+ justify-content: center;
318
+ padding: 20px;
319
+ background: linear-gradient(135deg, #E8E3DE 0%, #F2EDE9 100%);
320
+ border-radius: 15px;
321
+ font-family: 'Microsoft YaHei', sans-serif;
322
+ ">
323
+ <div style="
324
+ width: 350px;
325
+ background-color: #F2EDE9;
326
+ border-radius: 20px;
327
+ box-shadow: 0 20px 40px rgba(0,0,0,0.1);
328
+ padding: 30px;
329
+ text-align: center;
330
+ ">
331
+ <h2 style="color: #B85450; margin-bottom: 15px;">⚠️ 分析出错</h2>
332
+ <p style="color: #5B5B5B; margin-bottom: 20px;">AI分析过程中遇到问题:</p>
333
+ <div style="
334
+ background-color: rgba(184, 84, 80, 0.1);
335
+ padding: 15px;
336
+ border-radius: 10px;
337
+ border-left: 4px solid #B85450;
338
+ text-align: left;
339
+ ">
340
+ <code style="color: #B85450; font-size: 12px;">{error_msg}</code>
341
+ </div>
342
+ <p style="color: #8C8C8C; margin-top: 15px; font-size: 14px;">请稍后重试或检查网络连接</p>
343
+ </div>
344
+ </div>
345
+ """
346
 
347
+ def comprehensive_text_audit(text, keywords=""):
348
+ """
349
+ AI-powered comprehensive text audit with SVG visualization
350
+ """
351
+ if not text.strip():
352
+ return "❌ Please enter text content"
353
+
354
+ # Parse keywords
355
+ keyword_list = [k.strip() for k in keywords.split(",") if k.strip()] if keywords else []
356
 
357
+ try:
358
+ # 使用异步事件循环进行AI分析
359
+ loop = asyncio.new_event_loop()
360
+ asyncio.set_event_loop(loop)
361
+
362
+ try:
363
+ # 执行AI分析
364
+ analysis_result = loop.run_until_complete(
365
+ ai_analyzer.analyze_content(text, keyword_list)
366
+ )
367
+ finally:
368
+ loop.close()
369
+
370
+ # 生成汉语新解风格的SVG卡片
371
+ svg_cards = generate_hanyu_xinxie_cards(analysis_result, text)
372
+
373
+ return svg_cards
374
+
375
+ except Exception as e:
376
+ print(f"Analysis error: {e}")
377
+ # 返回错误信息的SVG卡片
378
+ return generate_error_card(str(e))
379
 
380
  # Create Gradio interface optimized for HF Spaces
381
  demo = gr.Interface(
 
392
  lines=2
393
  )
394
  ],
395
+ outputs=gr.HTML(
396
+ label="AI Audit Report"
 
 
397
  ),
398
+ title="🛡️ ContentGuardian - AI Content Audit Agent",
399
  description="""
400
+ **China · Simplified Chinese · Hanyu Xinxie Style · AI-Driven Version**
401
 
402
+ This system uses real AI analysis to detect content issues and generates beautiful Hanyu Xinxie style cards.
403
+ Powered by advanced language models for intelligent content understanding.
404
 
405
  **Key Features:**
406
+ - 🤖 AI-Powered Content Analysis
407
+ - 🔍 Intelligent Error Detection
408
+ - 📊 Beautiful SVG Card Visualization
409
  - 🎨 Hanyu Xinxie Aesthetic Style
410
  """,
411
  examples=[
412
  ["This product is absolutely effective, completely side-effect free, the first brand! Buy immediately, instant results!", "product,effect"],
413
  ["Our product quality is very good, trustworthy, welcome to purchase.", "product,quality"],
414
+ ["这款产品效果绝对好,完全无副作用,第一品牌!立即购买,马上见效!", "产品,效果"],
415
+ ["Buy now, instant effect, 100% effective, absolutely satisfying!", "buy,effect"]
416
  ],
417
  theme=gr.themes.Soft(),
418
+ flagging_mode="never"
419
  )
420
 
421
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1 +1,2 @@
1
  gradio>=4.0.0
 
 
1
  gradio>=4.0.0
2
+ aiohttp>=3.8.0