@article{chen2025deesd,title={DE-ESD: Dual encoder-based entity synonym discovery using pre-trained contextual embeddings},journal={Expert Systems with Applications},volume={276},pages={127102},year={2025},issn={0957-4174},doi={https://doi.org/10.1016/j.eswa.2025.127102},url={https://www.sciencedirect.com/science/article/pii/S0957417425007249},author={Huang, Subin and Chen, Junjie and Yu, Chengzhen and Li, Daoyu and Zhou, Qing and Liu, Sanmin},keywords={Entity synonym set, Entity synonym discovery, Dual encoder, Pre-trained language model, Contextual embedding},}
TaylorSeer
From Reusing to Forecasting: Accelerating Diffusion Models with TaylorSeers
@article{TaylorSeer2025,title={From Reusing to Forecasting: Accelerating Diffusion Models with TaylorSeers},author={Liu, Jiacheng and Zou, Chang and Lyu, Yuanhuiyi and Chen, Junjie and Zhang, Linfeng},journal={arXiv preprint arXiv:2503.06923},year={2025},url={https://arxiv.org/abs/2503.06923},}
SarcasmAnalysis
Seeing Sarcasm Through Different Eyes: Analyzing Multimodal Sarcasm Perception in Large Vision-Language Models
Junjie Chen, Xuyang Liu, Subin Huang, Linfeng Zhang, and Hang Yu
@article{chen2025seeingsarcasmdifferenteyes,title={Seeing Sarcasm Through Different Eyes: Analyzing Multimodal Sarcasm Perception in Large Vision-Language Models},author={Chen, Junjie and Liu, Xuyang and Huang, Subin and Zhang, Linfeng and Yu, Hang},year={2025},journal={arXiv preprint arXiv:2503.12149},url={https://arxiv.org/abs/2503.12149},}
2024
InterCLIP-MEP
InterCLIP-MEP: Interactive CLIP and Memory-Enhanced Predictor for Multi-modal Sarcasm Detection
Junjie Chen, Hang Yu, Subin Huang, Sanmin Liu, and Linfeng Zhang
@article{chen2024interclipmep,title={InterCLIP-MEP: Interactive CLIP and Memory-Enhanced Predictor for Multi-modal Sarcasm Detection},author={Chen, Junjie and Yu, Hang and Huang, Subin and Liu, Sanmin and Zhang, Linfeng},year={2024},journal={arXiv preprint arXiv:2406.16464},url={https://arxiv.org/abs/2406.16464},}