Skip to content

Commit 9c7c0ab

Browse files
Merge pull request #52 from mahrahimi1/patch-2
Update publications.bib
2 parents 5150075 + 68a368d commit 9c7c0ab

File tree

1 file changed

+19
-0
lines changed

1 file changed

+19
-0
lines changed

publications.bib

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,22 @@
1+
@inproceedings{rahimi-etal-2025-relation,
2+
title = "Relation-Aware Prompting Makes Large Language Models Effective Zero-shot Relation Extractors",
3+
author = "Rahimi, Mahdi and
4+
Dumitru, Razvan-Gabriel and
5+
Surdeanu, Mihai",
6+
editor = "Frermann, Lea and
7+
Stevenson, Mark",
8+
booktitle = "Proceedings of the 14th Joint Conference on Lexical and Computational Semantics (*SEM 2025)",
9+
month = nov,
10+
year = "2025",
11+
address = "Suzhou, China",
12+
publisher = "Association for Computational Linguistics",
13+
url = "https://aclanthology.org/2025.starsem-1.22/",
14+
doi = "10.18653/v1/2025.starsem-1.22",
15+
pages = "280--292",
16+
ISBN = "979-8-89176-340-1",
17+
abstract = "While supervised relation extraction (RE) models have considerably advanced the state-of-the-art, they often perform poorly in low-resource settings. Zero-shot RE is vital when annotations are not available either due to costs or time constraints. As a result, zero-shot RE has garnered interest in the research community. With the advent of large language models (LLMs) many approaches have been proposed for prompting LLMs for RE, but these methods often either rely on an accompanying small language model (e.g., for finetuning on synthetic data generated by LLMs) or require complex post-prompt processing. In this paper, we propose an effective prompt-based method that does not require any additional resources. Instead, we use an LLM to perform a two-step process. In the first step, we perform a targeted summarization of the text with respect to the underlying relation, reduce the applicable label space, and synthesize examples. Then, we combine the products of these processes with other elements into a final prompt. We evaluate our approach with various LLMs on four real-world RE datasets. Our evaluation shows that our method outperforms the previous state-of-the-art zero-shot methods by a large margin. This work can also be considered as a new strong baseline for zero-shot RE that is compatible with any LLM."
18+
}
19+
120
@inproceedings{abrar-etal-2025-repPIDDL,
221
title = "On the Reproducibiltiy of Provenance-based Intrusion Detection that uses Deep Learning",
322
author = "Abrar, Talha and

0 commit comments

Comments
 (0)