generated from eliahuhorwitz/Academic-project-page-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
248 lines (218 loc) · 12.5 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
<!-- Replace the content tag with appropriate information -->
<meta name="description" content="DESCRIPTION META TAG">
<meta property="og:title" content="SOCIAL MEDIA TITLE TAG"/>
<meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG"/>
<meta property="og:url" content="URL OF THE WEBSITE"/>
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
<meta property="og:image" content="static/image/your_banner_image.png" />
<meta property="og:image:width" content="1200"/>
<meta property="og:image:height" content="630"/>
<meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
<meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
<meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
<meta name="twitter:card" content="summary_large_image">
<!-- Keywords for your paper to be indexed by-->
<meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Towards Comprehensive Detection of Chinese Harmful Memes: Dataset and Detector</title>
<link rel="icon" type="image/x-icon" href="static/images/favicon.ico">
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
rel="stylesheet">
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="static/css/bulma-slider.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="static/css/index.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script src="https://documentcloud.adobe.com/view-sdk/main.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script src="static/js/bulma-carousel.min.js"></script>
<script src="static/js/bulma-slider.min.js"></script>
<script src="static/js/index.js"></script>
</head>
<body>
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title">Towards Comprehensive Detection of <br/>Chinese Harmful Memes: <br/>Dataset and Detector</h1>
<div class="is-size-5 publication-authors">
<!-- Paper authors -->
<span class="author-block"><a href="https://dut-lujunyu.github.io/" target="_blank">Junyu Lu,</a></span>
<span class="author-block"><a href="https://xubo123456.github.io/" target="_blank">Bo Xu,</a></span>
<span class="author-block"><a href="https://zhang-xiaokun.github.io/" target="_blank">Xiaokun Zhang,</a></span>
<span class="author-block"><a href="THIRD AUTHOR PERSONAL LINK" target="_blank">Hongbo Wang,</a></span>
<span class="author-block"><a href="https://scholar.google.com/citations?hl=zh-CN&user=qL6aVHgAAAAJ" target="_blank">Haohao Zhu,</a></span><br/>
<span class="author-block"><a href="https://scholar.google.com/citations?hl=zh-CN&user=K8aZDJUAAAAJ" target="_blank">Dongyu Zhang,</a></span>
<span class="author-block"><a href="https://scholar.google.com/citations?hl=zh-CN&user=AIug9aEAAAAJ" target="_blank">Liang Yang,</a></span>
<span class="author-block"><a href="https://scholar.google.com/citations?hl=zh-CN&user=kV68br0AAAAJ" target="_blank">Hongfei Lin,</a><sup>*</sup></span>
</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block"><sup></sup>Dalian University of Technology</span>
<span class="eql-cntrb"><small><br><sup>*</sup>Corresponding author</small></span>
<span class="eql-cntrb"><small><br><a
href="mailto:dut_ljy@foxmail.com">dut_ljy@foxmail.com</a>, <a
href="mailto:hflin@dlut.edu.cn">hflin@dlut.edu.cn</a></small></span>
</div>
<div class="column has-text-centered">
<div class="publication-links">
<!-- ArXiv abstract Link -->
<span class="link-block">
<a href="?" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="ai ai-arxiv"></i>
</span>
<span>arXiv</span>
</a>
</span>
<span class="link-block">
<a href="https://huggingface.co/datasets/JunyuLu/ToxiCN_MM" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
🤗
</span>
<span>Data</span>
</a>
</span>
<!-- Github link -->
<span class="link-block">
<a href="https://github.com/DUT-lujunyu/ToxiCN_MM" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Paper abstract -->
<section class="section hero is-light">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<h2 class="title is-3">Abstract</h2>
<div class="content has-text-justified">
<p>
Harmful memes have proliferated on the Chinese Internet, while research on detecting Chinese harmful memes significantly lags behind due to the absence of reliable datasets and effective detectors.
To this end, we present the comprehensive detection of Chinese harmful memes.
We introduce ToxiCN MM, the first Chinese harmful meme dataset, which consists of 12,000 samples with fine-grained annotations for meme types.
Additionally, we propose a baseline detector, Multimodal Harmful Knowledge Enhancement (MHKE), designed to incorporate contextual information from meme content, thereby enhancing the model's understanding of Chinese memes.
In the evaluation phase, we conduct extensive quantitative experiments and qualitative analyses on multiple baselines, including LLMs and our MHKE.
Experimental results indicate that detecting Chinese harmful memes is challenging for existing models, while demonstrating the effectiveness of MHKE.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper abstract -->
<section class="section is-small">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column is-full">
<div class="content">
<h2 class="title is-3">ToxiCN MM</h2>
<center>
<img src="static/images/introduction_7_small.png" alt="tes" class="centert-image"/>
</center>
<div class="level-set has-text-justified">
<p>
ToxiCN MM contains 12,000 diverse samples collected from Chinese social platforms.
In addition to the basic binary labels (i.e., harmful or non-harmful), we provide fine-grained annotations for harmful memes at two levels of granularity: <i>harmful type</i> and <i>modality combination feature</i>.<br/>
For the harmful type, we focus on both <i>targeted harmful memes</i> and those exhibiting potential toxicity without specific targets, including <i>general offense</i>, <i>sexual innuendo</i>, and <i>dispirited culture</i>.
These memes are identified as the most common harmful types of memes on Chinese platforms based on the consensus of social psychology. Their harm to individuals and society has been widely discussed.<br/>
For the modality combination feature, we examine how harmful memes convey toxicity through the interplay of textual and visual elements, either combined or independently, including <i>text-image fusion</i>, <i>harmful text</i>, and <i>harmful image</i>.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section is-small">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column is-full">
<div class="content">
<h2 class="title is-3">MHKE Detector</h2>
<center>
<img src="static/images/method.png" alt="tes" class="centert-image" style="width: 70%; height: auto;"/>
</center>
<div class="level-set has-text-justified">
<p>
For the detector development, we present a Multimodal Harmful Knowledge Enhancement (MHKE) detector, intuitively introducing the contextual information of meme content.
We utilize the large language model (LLM) to capture the context of both the text and image of the meme, leveraging its extensive knowledge acquired through pre-training.
This information is then integrated into a trainable detector as enhanced captions to improve the understanding of the meme.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<section class="section is-small">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column is-full">
<div class="content">
<h2 class="title is-3">Author Statement</h2>
<div class="level-set has-text-justified">
<p>
We, the authors of ToxiCN MM, hereby declare that we take full responsibility for any infringement of rights that may arise from the use of this dataset.
Our study aims to facilitate the comprehensive detection of Chinese harmful memes and raise researchers' attention to non-English memes.
We believe the benefits of our proposed resources outweigh the associated risks.
We strictly follow the data use agreements of each public online social platform.
It is important to note that all data has been anonymized and does not include any personal information.
The opinions and findings contained in the samples of our presented dataset should not be interpreted as representing the views expressed or implied by the authors.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<!--BibTex citation -->
<section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<h2 class="title">BibTeX</h2>
<pre><code>BibTex Code Here</code></pre>
</div>
</section>
<!--End BibTex citation -->
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page.
You are free to borrow the of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/" target="_blank">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
<!-- Statcounter tracking code -->
<!-- You can add a tracker to track page visits by creating an account at statcounter.com -->
<!-- End of Statcounter Code -->
</body>
</html>