Skip to content

Commit 4c3f0ec

Browse files
Update pub.bib (#291)
1 parent db2fe44 commit 4c3f0ec

File tree

1 file changed

+31
-0
lines changed

1 file changed

+31
-0
lines changed

source/_data/pub.bib

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,34 @@
1+
@Article{Chen_arXiv_2025_p2506.00880,
2+
author = {Zhuo Chen and Yizhen Zheng and Huan Yee Koh and Hongxin Xiang and
3+
Linjiang Chen and Wenjie Du and Yang Wang},
4+
title = {{ModuLM: Enabling Modular and Multimodal Molecular Relational Learning
5+
with Large Language Models}},
6+
journal = {arXiv},
7+
year = 2025,
8+
pages = {2506.00880},
9+
doi = {10.48550/arXiv.2506.00880},
10+
abstract = {Molecular Relational Learning (MRL) aims to understand interactions
11+
between molecular pairs, playing a critical role in advancing
12+
biochemical research. With the recent development of large language
13+
models (LLMs), a growing number of studies have explored the
14+
integration of MRL with LLMs and achieved promising results. However,
15+
the increasing availability of diverse LLMs and molecular structure
16+
encoders has significantly expanded the model space, presenting major
17+
challenges for benchmarking. Currently, there is no LLM framework that
18+
supports both flexible molecular input formats and dynamic
19+
architectural switching. To address these challenges, reduce redundant
20+
coding, and ensure fair model comparison, we propose ModuLM, a
21+
framework designed to support flexible LLM-based model construction
22+
and diverse molecular representations. ModuLM provides a rich suite of
23+
modular components, including 8 types of 2D molecular graph encoders,
24+
11 types of 3D molecular conformation encoders, 7 types of interaction
25+
layers, and 7 mainstream LLM backbones. Owing to its highly flexible
26+
model assembly mechanism, ModuLM enables the dynamic construction of
27+
over 50,000 distinct model configurations. In addition, we provide
28+
comprehensive results to demonstrate the effectiveness of ModuLM in
29+
supporting LLM-based MRL tasks.},
30+
}
31+
132
@Article{Zeng_JChemTheoryComput_2025_v21_p4375,
233
author = {Jinzhe Zeng and Duo Zhang and Anyang Peng and Xiangyu Zhang and Sensen
334
He and Yan Wang and Xinzijian Liu and Hangrui Bi and Yifan Li and Chun

0 commit comments

Comments
 (0)