generated from alshedivat/al-folio
-
Notifications
You must be signed in to change notification settings - Fork 3
/
papers.bib
205 lines (194 loc) · 17.3 KB
/
papers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
---
---
Buttons:
- `abbr`: Adds an abbreviation to the left of the entry. You can add links to these by creating a venue.yaml-file in the \_data folder and adding entries that match.
- `abstract`: Adds an "Abs" button that expands a hidden text field when clicked to show the abstract text
- `altmetric`: Adds an [Altmetric](https://www.altmetric.com/) badge (Note: if DOI is provided just use `true`, otherwise only add the altmetric identifier here - the link is generated automatically)
- `annotation`: Adds a popover info message to the end of the author list that can potentially be used to clarify superscripts. HTML is allowed.
- `arxiv`: Adds a link to the Arxiv website (Note: only add the arxiv identifier here - the link is generated automatically)
- `bibtex_show`: Adds a "Bib" button that expands a hidden text field with the full bibliography entry
- `blog`: Adds a "Blog" button redirecting to the specified link
- `code`: Adds a "Code" button redirecting to the specified link
- `dimensions`: Adds a [Dimensions](https://www.dimensions.ai/) badge (Note: if DOI or PMID is provided just use `true`, otherwise only add the Dimensions' identifier here - the link is generated automatically)
- `html`: Inserts an "HTML" button redirecting to the user-specified link
- `pdf`: Adds a "PDF" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
- `poster`: Adds a "Poster" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
- `slides`: Adds a "Slides" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
- `supp`: Adds a "Supp" button to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/pdf/ directory)
- `video`: Adds a "Video" button redirecting to a specified file (if a full link is not specified, the file will be assumed to be placed in the /assets/video/ directory)
- `website`: Adds a "Website" button redirecting to the specified link
Extra:
- `preview`
- `google_scholar_id`
- `additional_info`
- `award`
- `award_name`
- `eprint`
@misc{tinarrage_empirical_2024,
title = {Empirical analysis of {Binding} {Precedent} efficiency in the {Brazilian} {Supreme} {Court} via {Similar} {Case} {Retrieval}},
url = {https://arxiv.org/abs/2407.07004},
doi = {10.48550/arXiv.2407.07004},
abstract = {Binding precedents (Súmulas Vinculantes) constitute a juridical instrument unique to the Brazilian legal system and whose objectives include the protection of the Federal Supreme Court against repetitive demands. Studies of the effectiveness of these instruments in decreasing the Court's exposure to similar cases, however, indicate that they tend to fail in such a direction, with some of the binding precedents seemingly creating new demands. We empirically assess the legal impact of five binding precedents, 11, 14, 17, 26 and 37, at the highest court level through their effects on the legal subjects they address. This analysis is only possible through the comparison of the Court's ruling about the precedents' themes before they are created, which means that these decisions should be detected through techniques of Similar Case Retrieval. The contributions of this article are therefore twofold: on the mathematical side, we compare the uses of different methods of Natural Language Processing -- TF-IDF, LSTM, BERT, and regex -- for Similar Case Retrieval, whereas on the legal side, we contrast the inefficiency of these binding precedents with a set of hypotheses that may justify their repeated usage. We observe that the deep learning models performed significantly worse in the specific Similar Case Retrieval task and that the reasons for binding precedents to fail in responding to repetitive demand are heterogeneous and case-dependent, making it impossible to single out a specific cause.},
language = {en},
urldate = {2024-07-30},
publisher = {arXiv},
author = {Tinarrage, Raphaël and Ennes, Henrique and Resck, Lucas E. and Gomes, Lucas T. and Ponciano, Jean R. and Poco, Jorge},
month = jul,
year = {2024},
abbr = {arXiv},
arxiv = {2407.07004},
bibtex_show = true,
pdf = {https://arxiv.org/pdf/2407.07004},
google_scholar_id = {3fE2CSJIrl8C},
preview = {tinarrage_empirical_2024.png},
}
@inproceedings{resck_exploring_2024,
address = {Mexico City, Mexico},
title = {Exploring the {Trade}-off {Between} {Model} {Performance} and {Explanation} {Plausibility} of {Text} {Classifiers} {Using} {Human} {Rationales}},
url = {https://aclanthology.org/2024.findings-naacl.262},
doi = {10.18653/v1/2024.findings-naacl.262},
abstract = {Saliency post-hoc explainability methods are important tools for understanding increasingly complex NLP models. While these methods can reflect the model's reasoning, they may not align with human intuition, making the explanations not plausible. In this work, we present a methodology for incorporating rationales, which are text annotations explaining human decisions, into text classification models. This incorporation enhances the plausibility of post-hoc explanations while preserving their faithfulness. Our approach is agnostic to model architectures and explainability methods. We introduce the rationales during model training by augmenting the standard cross-entropy loss with a novel loss function inspired by contrastive learning. By leveraging a multi-objective optimization algorithm, we explore the trade-off between the two loss functions and generate a Pareto-optimal frontier of models that balance performance and plausibility. Through extensive experiments involving diverse models, datasets, and explainability methods, we demonstrate that our approach significantly enhances the quality of model explanations without causing substantial (sometimes negligible) degradation in the original model's performance.},
language = {English},
urldate = {2024-07-01},
booktitle = {Findings of the {Association} for {Computational} {Linguistics}: {NAACL} 2024},
publisher = {Association for Computational Linguistics},
author = {Resck, Lucas and Raimundo, Marcos M. and Poco, Jorge},
editor = {Duh, Kevin and Gomez, Helena and Bethard, Steven},
month = jun,
year = {2024},
additional_info = {*. Also presented as a poster at the LatinX in NLP at NAACL 2024 workshop*},
pages = {4190--4216},
abbr = {NAACL Findings 2024},
bibtex_show = true,
pdf = {https://aclanthology.org/2024.findings-naacl.262.pdf},
code = {https://github.com/visual-ds/plausible-nlp-explanations},
selected = {true},
html = {https://aclanthology.org/2024.findings-naacl.262},
poster = {publications/resck_exploring_2024_poster.pdf},
video = {https://youtu.be/P3X6Axtq7CY},
slides = {publications/resck_exploring_2024_slides.pdf},
google_scholar_id = {5nxA0vEk-isC},
preview = {resck_exploring_2024.png},
}
@mastersthesis{domingues_balancing_2024,
address = {Rio de Janeiro, Brazil},
title = {Balancing performance and explanation plausibility: a multi-objective approach to text classification with human rationales},
shorttitle = {Balancing performance and explanation plausibility},
url = {https://hdl.handle.net/10438/35362},
abstract = {Saliency post-hoc explainability methods are important tools for understanding increasingly complex NLP models. While these methods can reflect the model's reasoning, they may not align with human intuition, making the explanations not plausible. In this work, we present a methodology for incorporating rationales, which are text annotations explaining human decisions, into text classification models. This incorporation enhances the plausibility of post-hoc explanations while preserving their faithfulness. Our approach is agnostic to model architectures and explainability methods. We introduce the rationales during model training by augmenting the standard cross-entropy loss with a novel loss function inspired by contrastive learning. By leveraging a multi-objective optimization algorithm, we explore the trade-off between the two loss functions and generate a Pareto-optimal frontier of models that balance performance and plausibility. Through extensive experiments involving diverse models, datasets, and explainability methods, we demonstrate that our approach significantly enhances the quality of model explanations without causing substantial (sometimes negligible) degradation in the original model's performance.},
language = {eng},
urldate = {2024-07-04},
school = {Fundação Getulio Vargas},
author = {Domingues, Lucas Emanuel Resck},
month = may,
year = {2024},
abbr = {MSc thesis},
bibtex_show = true,
html = {https://hdl.handle.net/10438/35362},
pdf = {https://repositorio.fgv.br/server/api/core/bitstreams/eb8738f8-1935-4e41-92bc-465a6230f0ee/content},
code = {https://github.com/visual-ds/plausible-nlp-explanations},
google_scholar_id = {MXK_kJrjxJIC},
preview = {domingues_balancing_2024.png},
}
@inproceedings{pereira_distill_2023-1,
address = {Valencia, Spain},
series = {Proceedings of {Machine} {Learning} {Research}},
title = {Distill n' {Explain}: explaining graph neural networks using simple surrogates},
volume = {206},
shorttitle = {Distill n' {Explain}},
url = {https://proceedings.mlr.press/v206/pereira23a.html},
abstract = {Explaining node predictions in graph neural networks (GNNs) often boils down to finding graph substructures that preserve predictions. Finding these structures usually implies back-propagating through the GNN, bonding the complexity (e.g., number of layers) of the GNN to the cost of explaining it. This naturally begs the question: Can we break this bond by explaining a simpler surrogate GNN? To answer the question, we propose Distill n' Explain (DnX). First, DnX learns a surrogate GNN via knowledge distillation. Then, DnX extracts node or edge-level explanations by solving a simple convex program. We also propose FastDnX, a faster version of DnX that leverages the linear decomposition of our surrogate model. Experiments show that DnX and FastDnX often outperform state-of-the-art GNN explainers while being orders of magnitude faster. Additionally, we support our empirical findings with theoretical results linking the quality of the surrogate model (i.e., distillation error) to the faithfulness of explanations.},
language = {en},
urldate = {2023-04-30},
booktitle = {Proceedings of {The} 26th {International} {Conference} on {Artificial} {Intelligence} and {Statistics}},
publisher = {PMLR},
author = {Pereira, Tamara and Nascimento, Erik and Resck, Lucas E. and Mesquita, Diego and Souza, Amauri},
month = apr,
year = {2023},
pages = {6199--6214},
abbr = {AISTATS 2023},
bibtex_show = true,
html = {https://proceedings.mlr.press/v206/pereira23a.html},
pdf = {https://proceedings.mlr.press/v206/pereira23a/pereira23a.pdf},
code = {https://github.com/tamararruda/DnX},
poster = {publications/pereira_distill_2023-1_poster.pdf},
video = {https://youtu.be/DsTpJfjz6BQ},
selected = {true},
google_scholar_id = {UebtZRa9Y70C},
preview = {pereira_distill_2023-1.jpg},
}
@article{resck_legalvis_2023,
title = {{LegalVis}: {Exploring} and {Inferring} {Precedent} {Citations} in {Legal} {Documents}},
volume = {29},
issn = {1941-0506},
shorttitle = {{LegalVis}},
url = {https://ieeexplore.ieee.org/document/9716779/},
doi = {10.1109/TVCG.2022.3152450},
abstract = {To reduce the number of pending cases and conflicting rulings in the Brazilian Judiciary, the National Congress amended the Constitution, allowing the Brazilian Supreme Court (STF) to create binding precedents (BPs), i.e., a set of understandings that both Executive and lower Judiciary branches must follow. The STF's justices frequently cite the 58 existing BPs in their decisions, and it is of primary relevance that judicial experts could identify and analyze such citations. To assist in this problem, we propose LegalVis, a web-based visual analytics system designed to support the analysis of legal documents that cite or could potentially cite a BP. We model the problem of identifying potential citations (i.e., non-explicit) as a classification problem. However, a simple score is not enough to explain the results; that is why we use an interpretability machine learning method to explain the reason behind each identified citation. For a compelling visual exploration of documents and BPs, LegalVis comprises three interactive visual components: the first presents an overview of the data showing temporal patterns, the second allows filtering and grouping relevant documents by topic, and the last one shows a document's text aiming to interpret the model's output by pointing out which paragraphs are likely to mention the BP, even if not explicitly specified. We evaluated our identification model and obtained an accuracy of 96\%; we also made a quantitative and qualitative analysis of the results. The usefulness and effectiveness of LegalVis were evaluated through two usage scenarios and feedback from six domain experts.},
language = {English},
number = {6},
urldate = {2024-04-28},
journal = {IEEE Transactions on Visualization and Computer Graphics},
author = {Resck*, Lucas E. and Ponciano*, Jean R. and Nonato, Luis Gustavo and Poco, Jorge},
month = jun,
year = {2023},
additional_info = {*. Presented at Visualization \& Visual Analytics (VIS) 2022*},
keywords = {Data visualization, Task analysis, Visual analytics, Law, Natural language processing, Legal documents, natural language processing, Analytical models, Text analysis, Brazilian legal system, visual analytics},
pages = {3105--3120},
abbr = {TVCG 2023},
bibtex_show = true,
html = {https://ieeexplore.ieee.org/document/9716779/},
pdf = {https://arxiv.org/pdf/2203.02001},
video = {https://www.youtube.com/watch?v=0qY_NxLSGBk&t=26071s},
selected = {true},
annotation = {* The first two authors contributed equally to this work.},
google_scholar_id = {ufrVoPGSRksC},
preview = {resck_legalvis_2023.png},
}
@phdthesis{domingues_inferring_2021,
address = {Rio de Janeiro, Brazil},
title = {Inferring and {Explaining} {Potential} {Citations} to {Binding} {Precedents} in {Brazilian} {Supreme} {Court} {Decisions}},
url = {https://hdl.handle.net/10438/31845},
abstract = {The Brazilian Supreme Court (STF) is the highest law court in Brazil and it is primarily responsible for guarding the Brazilian Constitution. To reduce judicial insecurity and the high Court’s workload, a Constitutional Amendment from 2004 allowed STF to create binding precedents (“Súmulas Vinculantes,” BPs). A BP is a statement that consolidates the understanding of STF about a legal matter and has mandatory application for lower branches of the Judiciary. Frequently, an STF Justice cites a BP in a decision, and it is trivial to search for these explicit citations using regular expressions. However, it is not trivial to assert whether a decision potentially cites the statement, in the sense of “it should have cited it, but it did not” or “it addresses a similar issue, so they are related.” This work explores machine learning and natural language processing (NLP) algorithms to infer and explain these potential citations. The inference is performed using models from classical machine learning theory and recent NLP research, and the explanation is achieved using a machine learning explainability technique. The models learn what characterizes a citation through training on documents with explicit citations, in which we demonstrate they achieve high performance. We present two case studies that demonstrate the usefulness of the trained models to search for potential citations when accompanied
by the explainability technique to inform the most relevant parts of the document for the potential citation assignment.},
language = {eng},
urldate = {2022-07-21},
school = {Fundação Getulio Vargas},
author = {Domingues, Lucas Emanuel Resck},
month = dec,
year = {2021},
abbr = {BSc thesis},
bibtex_show = true,
html = {https://hdl.handle.net/10438/31845},
pdf = {https://repositorio.fgv.br/server/api/core/bitstreams/a1a7d243-3f54-4557-8b4f-35665db72af9/content},
google_scholar_id = {_FxGoFyzp5QC},
preview = {domingues_inferring_2021.png},
}
@techreport{domingues_circuits_2018,
address = {Varginha, Brazil},
type = {Scientific initiation project report},
title = {Circuits for {Driving} {Low} {Power} {Direct} {Current} {Motors}},
language = {pt},
institution = {Federal Center for Technological Education of Minas Gerais},
author = {Domingues, Lucas Emanuel Resck and Blahun, Júlia Gandini},
year = {2018},
additional_info = {*Original title in Portuguese: "Circuitos para Acionamento de Motores de Corrente Contínua de Baixa Potência"*},
pages = {32},
abbr = {Report},
bibtex_show = true,
google_scholar_id = {roLk4NBRz8UC},
}
@techreport{blahun_brazilian_2016,
address = {Varginha, Brazil},
type = {Scientific initiation project report},
title = {Brazilian {Robotics} {Olympiad} – {OBR}’2016, {Level} {II} {Practical} {Modality}},
language = {pt},
institution = {Federal Center for Technological Education of Minas Gerais},
author = {Blahun, Júlia Gandini and Regina, Luiza de Souza Pinto and Domingues, Lucas Emanuel Resck},
year = {2016},
additional_info = {*Original title in Portuguese: "Olimpíada Brasileira de Robótica – OBR’2016, Modalidade Prática de Nível II"*},
pages = {13},
abbr = {Report},
bibtex_show = true,
google_scholar_id = {LkGwnXOMwfcC},
}