Skip to content

Commit d51a896

Browse files
committed
add full publication
1 parent 02d8849 commit d51a896

File tree

10 files changed

+133
-32
lines changed

10 files changed

+133
-32
lines changed

app/join/page.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ export default function JoinPage() {
55
<div className={styles.container}>
66
For perspective students, I appreciate reading the following before
77
reaching out to me through email. To make it easier for me to identify the
8-
applications, use "PhD (or Postdoc, Visiting Student) Application" as your
8+
applications, use "PhD (or Postdoc, Visiting Student) Applications" as your
99
title. Due to the abundance of application emails, I might not be able to
1010
always respond to the email. But if you believe that you possess the
1111
credentials and quality mentioned below, feel free to remind me if you

app/page.tsx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ export default function Home() {
130130
title="Multi-Modal Foundation Model"
131131
/>
132132
<ResearchDirection image="/trustworthy.webp" title="Trustworthy AI" />
133-
<ResearchDirection image="/application.webp" title="Application" />
133+
<ResearchDirection image="/application.webp" title="Applications" />
134134
</div>
135135
</>
136136
);

app/publications/page.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import { Link } from "@nextui-org/link";
44
import { useSearchParams } from "next/navigation";
55

66
import { publications } from "@/config/publications";
7-
import { ApplicationTags } from "@/components/tag";
7+
import { PublicationTags } from "@/components/tag";
88

99
export default function PublicationsPage() {
1010
const searchParams = useSearchParams();
@@ -26,7 +26,7 @@ export default function PublicationsPage() {
2626
<div className="my-4">
2727
<div className="flex flex-row gap-4">
2828
<div>
29-
<ApplicationTags tags={publication.tags} />
29+
<PublicationTags tags={publication.tags} />
3030
</div>
3131
<div>
3232
<h2 className="font-bold text-xl">{publication.title}</h2>

components/icons.tsx

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,26 @@ export const DiscordIcon: React.FC<IconSvgProps> = ({
3333
);
3434
};
3535

36+
37+
export const LinkedinIcon: React.FC<IconSvgProps> = ({
38+
size = 21,
39+
width,
40+
height,
41+
...props
42+
}) => {
43+
return (
44+
<svg
45+
viewBox="0 0 448 512"
46+
height={size || height}
47+
width={size || width}
48+
{...props}
49+
>
50+
<path d="M416 32H31.9C14.3 32 0 46.5 0 64.3v383.4C0 465.5 14.3 480 31.9 480H416c17.6 0 32-14.5 32-32.3V64.3c0-17.8-14.4-32.3-32-32.3zM135.4 416H69V202.2h66.5V416zm-33.2-243c-21.3 0-38.5-17.3-38.5-38.5S80.9 96 102.2 96c21.2 0 38.5 17.3 38.5 38.5 0 21.3-17.2 38.5-38.5 38.5zm282.1 243h-66.4V312c0-24.8-.5-56.7-34.5-56.7-34.6 0-39.9 27-39.9 54.9V416h-66.4V202.2h63.7v29.2h.9c8.9-16.8 30.6-34.5 62.9-34.5 67.2 0 79.7 44.3 79.7 101.9V416z" fill="currentColor" />
51+
</svg>
52+
);
53+
};
54+
55+
3656
export const TwitterIcon: React.FC<IconSvgProps> = ({
3757
size = 24,
3858
width,

components/navbar.tsx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ import clsx from "clsx";
1212

1313
import { siteConfig } from "@/config/site";
1414
import { ThemeSwitch } from "@/components/theme-switch";
15-
import { GithubIcon, Logo } from "@/components/icons";
15+
import { GithubIcon, Logo, LinkedinIcon } from "@/components/icons";
1616

1717
export const Navbar = () => {
1818
return (
@@ -49,9 +49,9 @@ export const Navbar = () => {
4949
justify="end"
5050
>
5151
<NavbarItem className="hidden sm:flex gap-2">
52-
{/* <Link isExternal aria-label="Twitter" href={siteConfig.links.twitter}>
53-
<TwitterIcon className="text-default-500" />
54-
</Link> */}
52+
<Link isExternal aria-label="Linkedin" href={siteConfig.links.linkedin}>
53+
<LinkedinIcon className="text-default-500" />
54+
</Link>
5555

5656
<Link isExternal aria-label="Github" href={siteConfig.links.github}>
5757
<GithubIcon className="text-default-500" />

components/tag.tsx

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,23 +5,23 @@ import React from "react";
55

66
import { Tag } from "@/config/publications";
77

8-
function ApplicationTags({ tags }: { tags: Tag[] }) {
8+
function PublicationTags({ tags }: { tags: Tag[] }) {
99
return (
1010
<>
1111
{tags.map((tag) => (
12-
<ApplicationTag key={tag} tag={tag} />
12+
<PublicationTag key={tag} tag={tag} />
1313
))}
1414
</>
1515
);
1616
}
1717

18-
function ApplicationTag({ tag }: { tag: Tag }) {
18+
function PublicationTag({ tag }: { tag: Tag }) {
1919
let name;
2020
let color;
2121

2222
switch (tag) {
23-
case Tag.Application:
24-
name = "Application";
23+
case Tag.Applications:
24+
name = "Applications";
2525
color = "#ffe119";
2626
break;
2727
case Tag.TrustworthyAI:
@@ -45,4 +45,4 @@ function ApplicationTag({ tag }: { tag: Tag }) {
4545
);
4646
}
4747

48-
export { ApplicationTags };
48+
export { PublicationTags };

config/publications.ts

Lines changed: 96 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ export enum Tag {
22
GraphRepresentationLearning = "Graph Representation Learning",
33
MultiModalFoundationModel = "Multi-Modal Foundation Model",
44
TrustworthyAI = "Trustworthy AI",
5-
Application = "Application",
5+
Applications = "Applications",
66
}
77

88
export interface Publication {
@@ -16,6 +16,36 @@ export interface Publication {
1616
}
1717

1818
export const publications = [
19+
{
20+
title: "HEART: Learning Better Representation of EHR Data with a Heterogeneous Relation-Aware Transformer",
21+
authors: "T Huang, SA Rizvi, R Krishna Thakur, V Socrates, M Gupta, D Dijk, RA Taylor, R Ying",
22+
venue: "Preprint",
23+
code: null,
24+
paper: "https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4904741",
25+
abstract: "We propose HEART, a pretrained language model for structured EHR data. HEART seamlessly encodes heterogeneous medical entity information through a novel relation embedding module and a multi-level attention scheme.",
26+
impact: "This novel pretrained framework, featuring a new architecture and dedicated objectives, can inspire future research on foundation models in EHR.",
27+
tags: [Tag.Applications],
28+
},
29+
{
30+
title: "Protein-Nucleic Acid Complex Modeling with Frame Averaging Transformer",
31+
authors: "Tinglin Huang, Zhenqiao Song, Rex Ying, Wengong Jin",
32+
venue: "MLSB Workshop, NeurIPS, 2023",
33+
code: "https://github.com/Graph-and-Geometric-Learning/Frame-Averaging-Transformer",
34+
paper: "https://arxiv.org/abs/2406.09586",
35+
abstract: "Propose a novel unsupervised aptamer screening paradigm and FAFormer, a frame averaging-based equivariant transformer architecture.",
36+
impact: "We explore a new angle to conduct aptamer screening in an unsupervised manner by leveraging the strong correlation with the contact map prediction task. Besides, we propose to integrate Frame Averaging (FA) within each transformer module and develop FAFormer, highlighting a new possibility for geometric encoder design in this domain.",
37+
tags: [Tag.Applications],
38+
},
39+
{
40+
title: "From Similarity to Superiority: Channel Clustering for Time Series Forecasting",
41+
authors: "Jialin Chen, Jan Eric Lenssen, Aosong Feng, Weihua Hu, Matthias Fey, Leandros Tassiulas, Jure Leskovec, Rex Ying",
42+
venue: "Arxiv Preprint",
43+
code: null,
44+
paper: "https://arxiv.org/pdf/2404.01340",
45+
abstract: "We developed a novel and adaptable Channel Clustering Module (CCM), which dynamically groups channels characterized by intrinsic similarities and leverages cluster identity, instead of channel identity, to improve time series forecasting performance.",
46+
impact: "Extensive experiments demonstrate that CCM with mainstream time series forecasting models can (1) boost the performance of time series forecasting by an average margin of 2.4% and 7.2% on long-term and short-term forecasting; (2) enable more accurate zero-shot forecasting; (3) uncover intrinsic time series patterns among channels and improve interpretability of complex time series models.",
47+
tags: [],
48+
},
1949
{
2050
title: "Learning High-Order Relationships of Brain Regions",
2151
authors:
@@ -24,26 +54,21 @@ export const publications = [
2454
page: null,
2555
paper: "https://arxiv.org/abs/2312.02203",
2656
code: "https://github.com/Graph-and-Geometric-Learning/HyBRiD",
27-
tags: [Tag.Application, Tag.GraphRepresentationLearning],
57+
tags: [Tag.Applications, Tag.GraphRepresentationLearning],
2858
abstract:
2959
"Traditional methods only focus on pariwise connectivity of brain regions. We proposed a new framework based on information bottleneck that learns high-order relationships of brain regions.",
3060
impact:
3161
"The learned high-order relationships achieve SOTA performance on predictive tasks and are demonstrated significantly more effective than traditional pairwise methods.",
3262
},
3363
{
34-
title:
35-
"D4Explainer: In-distribution explanations of graph neural network via discrete denoising diffusion",
36-
authors: "Jialin Chen, Shirley Wu, Abhijit Gupta, Rex Ying",
37-
venue: "NeurIPS 2023",
38-
page: null,
39-
paper:
40-
"https://proceedings.neurips.cc/paper_files/paper/2023/hash/f978c8f3b5f399cae464e85f72e28503-Abstract-Conference.html",
41-
code: "https://github.com/Graph-and-Geometric-Learning/D4Explainer",
42-
tags: [Tag.TrustworthyAI],
43-
abstract:
44-
"We propose D4Explainer, a novel approach that provides in-distribution GNN explanations for both counterfactual and model-level explanation scenarios.",
45-
impact:
46-
"D4Explainer is the first unified framework that combines both counterfactual and model-level explanations. Empirical evaluations on synthetic and real-world datasets demonstrate the state-of-the-art performance achieved by D4Explainer in terms of explanation accuracy, faithfulness, diversity, and robustness.",
64+
title: "Online Detection of Anomalies in Temporal Knowledge Graphs with Interpretability",
65+
authors: "Jiasheng Zhang, Rex Ying, Jie Shao",
66+
venue: "SIGMOD 2025",
67+
code: "https://github.com/zjs123/ANoT",
68+
paper: "https://arxiv.org/abs/2408.00872",
69+
abstract: "we introduce AnoT, an efficient TKG summarization method tailored for interpretable online anomaly detection in TKGs. AnoT begins by summarizing a TKG into a novel rule graph, enabling flexible inference of complex patterns in TKGs.",
70+
impact: "The first attempt at strategies to summarize a temporal knowledge graph and first explore how to inductively detect anomalies in TKG.",
71+
tags: [Tag.GraphRepresentationLearning],
4772
},
4873
{
4974
title: "DTGB: A Comprehensive Benchmark for Dynamic Text-Attributed Graphs",
@@ -53,11 +78,11 @@ export const publications = [
5378
page: null,
5479
paper: "https://arxiv.org/abs/2406.12072",
5580
code: "https://github.com/zjs123/DTGB",
56-
tags: [Tag.GraphRepresentationLearning],
5781
abstract:
5882
"we introduce Dynamic Text-attributed Graph Benchmark (DTGB), a collection of large-scale, time-evolving graphs from diverse domains, with nodes and edges enriched by dynamically changing text attributes and categories.",
5983
impact:
6084
"he proposed DTGB fosters research on DyTAGs and their broad applications. It offers a comprehensive benchmark for evaluating and advancing models to handle the interplay between dynamic graph structures and natural language.",
85+
tags: [Tag.GraphRepresentationLearning],
6186
},
6287
{
6388
title:
@@ -73,5 +98,60 @@ export const publications = [
7398
"Hypformer, a new hyperbolic Transformer based on the Lorentz model of hyperbolic geometry, addresses existing limitations with foundational modules and a linear self-attention mechanism, demonstrating effectiveness and scalability across various datasets.",
7499
impact:
75100
"Hypformer represents a significant advancement in the application of hyperbolic geometry to large-scale data representation, enabling the processing of billion-scale graph data and long-sequence inputs with hyperbolic geometry.",
101+
},
102+
{
103+
title: "Explaining Graph Neural Networks via Structure-aware Interaction Index",
104+
authors: "Ngoc Bui, Hieu Trung Nguyen, Viet Anh Nguyen, Rex Ying",
105+
venue: "ICML 2024",
106+
code: "https://github.com/ngocbh/MAGE",
107+
paper: "https://arxiv.org/abs/2405.14352",
108+
abstract: "We introduces a novel interaction index, namely the Myerson-Taylor interaction index, that internalizes the graph structure into attributing the node values of Shapley value and the interaction values among nodes. We prove that that the Myerson-Taylor index is the unique one that satisfies a system of five natural axioms accounting for graph structure and high-order interaction among nodes. We propose MAGE, a new graph explainer that uses the second-order Myerson-Taylor index to identify the most important motifs influencing the model prediction.",
109+
impact: "Myerson-Taylor interaction index is the unique generalization of the Shapley and Myerson values to account for both graph structure and high-order interaction among nodes. MAGE is also the first graph explainer that leverages (high-) second-order interaction index to identify multiple explainatory motifs for GNNs.",
110+
tags: [Tag.TrustworthyAI],
111+
},
112+
{
113+
title: "TempMe: Towards the explainability of temporal graph neural networks via motif discovery",
114+
authors: "Jialin Chen, Rex Ying",
115+
venue: "NeurIPS 2023",
116+
code: "https://github.com/Graph-and-Geometric-Learning/TempME",
117+
paper: "https://proceedings.neurips.cc/paper_files/paper/2023/hash/5c5bc3553815adb4d1a8a5b8701e41a9-Abstract-Conference.html",
118+
tags: [Tag.TrustworthyAI],
119+
abstract: "Derived from the information bottleneck principle, we propose a novel approach, called Temporal Motifs Explainer (TempME), which uncovers the most pivotal temporal motifs guiding the prediction of TGNNs. ",
120+
impact: "Events in the explanations generated by TempME are verified to be more spatiotemporally correlated than those of existing approaches, with up to 8.21% increase in terms of explanation accuracy across six real-world datasets and up to 22.96% increase in boosting the prediction Average Precision of current TGNNs.",
121+
},
122+
{
123+
title:
124+
"D4Explainer: In-distribution explanations of graph neural network via discrete denoising diffusion",
125+
authors: "Jialin Chen, Shirley Wu, Abhijit Gupta, Rex Ying",
126+
venue: "NeurIPS 2023",
127+
page: null,
128+
paper:
129+
"https://proceedings.neurips.cc/paper_files/paper/2023/hash/f978c8f3b5f399cae464e85f72e28503-Abstract-Conference.html",
130+
code: "https://github.com/Graph-and-Geometric-Learning/D4Explainer",
131+
tags: [Tag.TrustworthyAI],
132+
abstract:
133+
"We propose D4Explainer, a novel approach that provides in-distribution GNN explanations for both counterfactual and model-level explanation scenarios.",
134+
impact:
135+
"D4Explainer is the first unified framework that combines both counterfactual and model-level explanations. Empirical evaluations on synthetic and real-world datasets demonstrate the state-of-the-art performance achieved by D4Explainer in terms of explanation accuracy, faithfulness, diversity, and robustness.",
136+
},
137+
{
138+
title: "Learning to Group Auxiliary Datasets for Molecule",
139+
authors: "Tinglin Huang, Ziniu Hu, Rex Ying",
140+
venue: "NeurIPS 2023",
141+
code: "https://github.com/Graph-and-Geometric-Learning/MolGroup",
142+
paper: "https://arxiv.org/abs/2307.04052",
143+
abstract: "Propose a routing-based auxiliary dataset grouping method to enhance model performance on molecule datasets with limited labels.",
144+
impact: "The proposed auxiliary dataset grouping introduces a new paradigm for enhancing model performance on downstream tasks.Besides, our proposed MolGroup is model- agnostic, which can significantly improve model's performance across 11 molecule datasets.",
145+
tags: [Tag.Applications],
146+
},
147+
{
148+
title: "BatchSampler: Sampling Mini-Batches for Contrastive Learning in Vision, Language, and Graphs",
149+
authors: "Zhen Yang, Tinglin Huang, Ming Ding, Yuxiao Dong, Rex Ying, Yukuo Cen, Yangliao Geng, Jie Tang",
150+
venue: "SIGKDD 2023",
151+
code: "https://github.com/THUDM/BatchSampler",
152+
paper: "https://arxiv.org/abs/2306.03355v1",
153+
abstract: "Propose a global negative sampling method for contrastive learning by modeling negative sampling as graph sampling on a proximity graph.",
154+
impact: "The proposed algorithm is theoretically guaranteed and can consistently improve nine contrastive learning methods across graph, image, and language modalities.",
155+
tags: []
76156
},
77157
];

config/site.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,6 @@ export const siteConfig = {
2828
links: {
2929
github: "https://github.com/Graph-and-Geometric-Learning",
3030
twitter: "https://twitter.com/getnextui",
31+
linkedin: "https://www.linkedin.com/in/rex-ying-92770148/",
3132
},
3233
};

next.config.js

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
/** @type {import('next').NextConfig} */
22
const nextConfig = {
3-
output: 'export',
43
images: {
54
formats: ["image/avif", "image/webp"],
65
remotePatterns: [

package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@
3333
"next": "^14.2.9",
3434
"next-themes": "^0.2.1",
3535
"react": "^18.3.1",
36-
"react-dom": "^18.3.1"
36+
"react-dom": "^18.3.1",
37+
"react-icons": "^5.3.0"
3738
},
3839
"devDependencies": {
3940
"@types/node": "20.5.7",

0 commit comments

Comments
 (0)