@@ -332,6 +332,8 @@ <h2>About<a class="headerlink" href="#about" title="Link to this heading">#</a><
332332< section id ="news ">
333333< h2 > News< a class ="headerlink " href ="#news " title ="Link to this heading "> #</ a > </ h2 >
334334< ul class ="simple ">
335+ < li > < p > [2025.11] 3 papers accepted to AAAI26</ p > </ li >
336+ < li > < p > [2025.11] HoloMotion V1.0 released!</ p > </ li >
335337< li > < p > [2025.09] 2 papers accepted to NeurIPS25</ p > </ li >
336338< li > < p > [2025.06] 1 paper accepted to IROS25</ p > </ li >
337339< li > < p > [2025.06] In < a class ="reference external " href ="https://robotwin-benchmark.github.io/cvpr-2025-challenge/#challenge-details "> RoboTwin Dual-Arm Collaboration Challenge 2nd MEIS Workshop@CVPR2025</ a > , we won < strong > 🥇first place</ strong > in < strong > Real-world Track</ strong > and < strong > 🥈second place</ strong > in < strong > Simulation Track</ strong > </ p > </ li >
@@ -350,7 +352,7 @@ <h3>RoboOrchard<a class="headerlink" href="#roboorchard" title="Link to this hea
350352< col style ="width: 80.0% " />
351353</ colgroup >
352354< tbody >
353- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/projects/robo_orchard/logo.png "> < img alt ="RoboOrchardLab Logo " class ="publication-thumbnail " src ="_images/logo.png " />
355+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/projects/robo_orchard/logo.png "> < img alt ="RoboOrchard Logo " class ="publication-thumbnail " src ="_images/logo.png " />
354356</ a >
355357</ td >
356358< td > < p > < strong > RoboOrchard</ strong > is a collection of tools and frameworks designed to streamline the entire process of robotics research and development,
@@ -392,25 +394,34 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
392394< col style ="width: 80.0% " />
393395</ colgroup >
394396< tbody >
395- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/HRDT.gif "> < img alt ="HRDT Logo " class ="publication-thumbnail " src ="_images/HRDT.gif " />
397+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/fsr-vln.gif "> < img alt ="fsr-vln Logo " class ="publication-thumbnail " src ="_images/fsr-vln.gif " />
398+ </ a >
399+ </ td >
400+ < td > < p > < strong > FSR-VLN: Fast and Slow Reasoning for Vision-Language Navigation with Hierarchical Multi-modal Scene Graph</ strong > </ p >
401+ < p > < span class ="blue "> Xiaolin Zhou</ span > , < span class ="blue "> Tingyang Xiao</ span > , < span class ="blue "> Liu Liu</ span > , < span class ="blue "> Yucheng Wang</ span > , < span class ="blue "> Maiyue Che</ span > , < span class ="blue "> Xinrui Meng</ span > , < span class ="blue "> Xinjie Wang</ span > , < span class ="blue "> Wei Sui</ span > , < span class ="blue "> Zhizhong Su</ span > </ p >
402+ < p > Arxiv 2025</ p >
403+ < p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/fsr-vln/ "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/pdf/2509.13733 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/HoBotBrain "> Code</ a > </ p >
404+ </ td >
405+ </ tr >
406+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/HRDT.gif "> < img alt ="HRDT Logo " class ="publication-thumbnail " src ="_images/HRDT.gif " />
396407</ a >
397408</ td >
398409< td > < p > < strong > H-RDT: Human Manipulation Enhanced Bimanual Robotic Manipulation</ strong > </ p >
399410< p > < span class ="blue "> Hongzhe Bi</ span > , Lingxuan Wu, < span class ="blue "> Tianwei Lin</ span > , Hengkai Tan, < span class ="blue "> Zhizhong Su</ span > , Hang Su, Jun Zhu</ p >
400- < p > Arxiv 2025 </ p >
411+ < p > AAAI 2026 </ p >
401412< p > < a class ="reference external " href ="https://embodiedfoundation.github.io/hrdt "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2507.23523 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HongzheBi/H_RDT "> Code</ a > </ p >
402413</ td >
403414</ tr >
404- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/monodream.gif "> < img alt ="MonoDream Logo " class ="publication-thumbnail " src ="_images/monodream.gif " />
415+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/monodream.gif "> < img alt ="MonoDream Logo " class ="publication-thumbnail " src ="_images/monodream.gif " />
405416</ a >
406417</ td >
407418< td > < p > < strong > MonoDream: Monocular Vision-Language Navigation with Panoramic Dreaming</ strong > </ p >
408419< p > < span class ="blue "> Shuo Wang</ span > , Yongcai Wang, Wanting Li, < span class ="blue "> Yucheng Wang</ span > , < span class ="blue "> Maiyue Chen</ span > , < span class ="blue "> Kaihui Wang</ span > , < span class ="blue "> Zhizhong Su</ span > , Xudong Cai, Yeying Jin, Deying Li, Zhaoxin Fan</ p >
409- < p > Arxiv 2025 </ p >
420+ < p > AAAI 2026 </ p >
410421< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/monodream/ "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2508.02549 "> Paper</ a > </ p >
411422</ td >
412423</ tr >
413- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/Uni3R.gif "> < img alt ="Uni3R Logo " class ="publication-thumbnail " src ="_images/Uni3R.gif " />
424+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/Uni3R.gif "> < img alt ="Uni3R Logo " class ="publication-thumbnail " src ="_images/Uni3R.gif " />
414425</ a >
415426</ td >
416427< td > < p > < strong > Uni3R: Unified 3D Reconstruction and Semantic Understanding via Generalizable Gaussian Splatting from Unposed Multi-View Images</ strong > </ p >
@@ -419,16 +430,16 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
419430< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/uni3R/ "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/pdf/2508.03643 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/Uni3R "> Code</ a > </ p >
420431</ td >
421432</ tr >
422- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/IGFuse.gif "> < img alt ="IGFuse Logo " class ="publication-thumbnail " src ="_images/IGFuse.gif " />
433+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/IGFuse.gif "> < img alt ="IGFuse Logo " class ="publication-thumbnail " src ="_images/IGFuse.gif " />
423434</ a >
424435</ td >
425436< td > < p > < strong > IGFuse: Interactive 3D Gaussian Scene Reconstruction via Multi-Scans Fusion</ strong > </ p >
426437< p > < span class ="blue "> Wenhao Hu</ span > , Zesheng Li, Haonan Zhou, < span class ="blue "> Liu Liu</ span > , Xuexiang Wen, < span class ="blue "> Zhizhong Su</ span > , Xi Li, Gaoang Wang</ p >
427- < p > Arxiv 2025 </ p >
438+ < p > AAAI 2026 Oral </ p >
428439< p > < a class ="reference external " href ="https://whhu7.github.io/IGFuse/ "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/pdf/2508.13153 "> Paper</ a > | < a class ="reference external " href ="https://github.com/whhu7/IGFuse-code "> Code</ a > </ p >
429440</ td >
430441</ tr >
431- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/DreamLifting.gif "> < img alt ="DreamLifting Logo " class ="publication-thumbnail " src ="_images/DreamLifting.gif " />
442+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/DreamLifting.gif "> < img alt ="DreamLifting Logo " class ="publication-thumbnail " src ="_images/DreamLifting.gif " />
432443</ a >
433444</ td >
434445< td > < p > < strong > DreamLifting: A Plug-in Module Lifting MV Diffusion Models for 3D Asset Generation</ strong > </ p >
@@ -437,7 +448,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
437448< p > < a class ="reference external " href ="https://zx-yin.github.io/dreamlifting "> Webpage</ a > </ p >
438449</ td >
439450</ tr >
440- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/finegrasp.gif "> < img alt ="FineGrasp Logo " class ="publication-thumbnail " src ="_images/finegrasp.gif " />
451+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/finegrasp.gif "> < img alt ="FineGrasp Logo " class ="publication-thumbnail " src ="_images/finegrasp.gif " />
441452</ a >
442453</ td >
443454< td > < p > < strong > FineGrasp: Towards Robust Grasping for Delicate Objects</ strong > </ p >
@@ -446,7 +457,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
446457< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/finegrasp/index.html "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2507.05978 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/robo_orchard_lab/tree/master/projects/finegrasp_graspnet1b "> Code</ a > </ p >
447458</ td >
448459</ tr >
449- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/embodied_gen.gif "> < img alt ="EmbodiedGen Logo " class ="publication-thumbnail " src ="_images/embodied_gen.gif " />
460+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/embodied_gen.gif "> < img alt ="EmbodiedGen Logo " class ="publication-thumbnail " src ="_images/embodied_gen.gif " />
450461</ a >
451462</ td >
452463< td > < p > < strong > EmbodiedGen: Towards a Generative 3D World Engine for Embodied Intelligence</ strong > </ p >
@@ -455,7 +466,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
455466< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/embodied_gen/index.html "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2506.10600 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/EmbodiedGen "> Code</ a > | < a class ="reference external " href ="https://huggingface.co/collections/HorizonRobotics/embodiedgen-684c3ec6cfd908bb1d6069bf "> HF Demo</ a > </ p >
456467</ td >
457468</ tr >
458- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/robotransfer.gif "> < img alt ="RoboTransfer Logo " class ="publication-thumbnail " src ="_images/robotransfer.gif " />
469+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/robotransfer.gif "> < img alt ="RoboTransfer Logo " class ="publication-thumbnail " src ="_images/robotransfer.gif " />
459470</ a >
460471</ td >
461472< td > < p > < strong > RoboTransfer: Geometry-Consistent Video Diffusion for Robotic Visual Policy Transfer</ strong > </ p >
@@ -464,7 +475,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
464475< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/robotransfer/index.html "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2505.23171 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/RoboTransfer "> Code</ a > </ p >
465476</ td >
466477</ tr >
467- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/dipo.gif "> < img alt ="DIPO Logo " class ="publication-thumbnail " src ="_images/dipo.gif " />
478+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/dipo.gif "> < img alt ="DIPO Logo " class ="publication-thumbnail " src ="_images/dipo.gif " />
468479</ a >
469480</ td >
470481< td > < p > < strong > DIPO: Dual-State Images Controlled Articulated Object Generation Powered by Diverse Data</ strong > </ p >
@@ -473,7 +484,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
473484< p > < a class ="reference external " href ="https://rq-wu.github.io/projects/DIPO "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2505.20460 "> Paper</ a > | < a class ="reference external " href ="https://github.com/RQ-Wu/DIPO "> Code</ a > </ p >
474485</ td >
475486</ tr >
476- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/sem.gif "> < img alt ="SEM Logo " class ="publication-thumbnail " src ="_images/sem.gif " />
487+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/sem.gif "> < img alt ="SEM Logo " class ="publication-thumbnail " src ="_images/sem.gif " />
477488</ a >
478489</ td >
479490< td > < p > < strong > SEM: Enhancing Spatial Understanding for Robust Robot Manipulation</ strong > </ p >
@@ -482,7 +493,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
482493< p > < a class ="reference external " href ="https://arxiv.org/abs/2505.16196 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/robo_orchard_lab/tree/master/projects/sem "> Code</ a > </ p >
483494</ td >
484495</ tr >
485- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/aux-think.gif "> < img alt ="AuxThink Logo " class ="publication-thumbnail " src ="_images/aux-think.gif " />
496+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/aux-think.gif "> < img alt ="AuxThink Logo " class ="publication-thumbnail " src ="_images/aux-think.gif " />
486497</ a >
487498</ td >
488499< td > < p > < strong > Aux-Think: Exploring Reasoning Strategies for Data-Efficient Vision-Language Navigation</ strong > </ p >
@@ -491,7 +502,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
491502< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/aux-think/index.html "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2505.11886 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/robo_orchard_lab/tree/master/projects/aux_think "> Code</ a > </ p >
492503</ td >
493504</ tr >
494- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/GeomFlow.gif "> < img alt ="GeomFlow Logo " class ="publication-thumbnail " src ="_images/GeomFlow.gif " />
505+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/GeomFlow.gif "> < img alt ="GeomFlow Logo " class ="publication-thumbnail " src ="_images/GeomFlow.gif " />
495506</ a >
496507</ td >
497508< td > < p > < strong > GeoFlow-SLAM: A Robust Tightly-Coupled RGBD-Inertial Fusion SLAM for Dynamic Legged Robotics</ strong > </ p >
@@ -500,7 +511,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
500511< p > < a class ="reference external " href ="https://arxiv.org/abs/2503.14247 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/GeoFlowSlam "> Code</ a > </ p >
501512</ td >
502513</ tr >
503- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/gstr.gif "> < img alt ="GaussTR Logo " class ="publication-thumbnail " src ="_images/gstr.gif " />
514+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/gstr.gif "> < img alt ="GaussTR Logo " class ="publication-thumbnail " src ="_images/gstr.gif " />
504515</ a >
505516</ td >
506517< td > < p > < strong > GaussTR: Foundation model-aligned gaussian transformer for self-supervised 3d spatial understanding</ strong > </ p >
@@ -509,7 +520,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
509520< p > < a class ="reference external " href ="https://hustvl.github.io/GaussTR/ "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2412.13193 "> Paper</ a > | < a class ="reference external " href ="https://github.com/hustvl/GaussTR "> Code</ a > </ p >
510521</ td >
511522</ tr >
512- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/bip3d.gif "> < img alt ="BIP3D Logo " class ="publication-thumbnail " src ="_images/bip3d.gif " />
523+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/bip3d.gif "> < img alt ="BIP3D Logo " class ="publication-thumbnail " src ="_images/bip3d.gif " />
513524</ a >
514525</ td >
515526< td > < p > < strong > BIP3D: Bridging 2d images and 3d perception for embodied intelligence</ strong > </ p >
@@ -518,7 +529,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
518529< p > < a class ="reference external " href ="https://horizonrobotics.github.io/robot_lab/bip3d/index.html "> Webpage</ a > | < a class ="reference external " href ="https://arxiv.org/abs/2411.14869 "> Paper</ a > | < a class ="reference external " href ="https://github.com/HorizonRobotics/robo_orchard_lab/tree/master/projects/bip3d_grounding "> Code</ a > </ p >
519530</ td >
520531</ tr >
521- < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/goc.gif "> < img alt ="GOC Logo " class ="publication-thumbnail " src ="_images/goc.gif " />
532+ < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/goc.gif "> < img alt ="GOC Logo " class ="publication-thumbnail " src ="_images/goc.gif " />
522533</ a >
523534</ td >
524535< td > < p > < strong > Gaussian Object Carver: Object-Compositional Gaussian Splatting with surfaces completion</ strong > </ p >
@@ -527,7 +538,7 @@ <h2>Publications<a class="headerlink" href="#publications" title="Link to this h
527538< p > < a class ="reference external " href ="https://arxiv.org/abs/2412.02075 "> Paper</ a > </ p >
528539</ td >
529540</ tr >
530- < tr class ="row-even "> < td > < a class ="reference external image-reference " href ="_static/publications/gls.gif "> < img alt ="GLS Logo " class ="publication-thumbnail " src ="_images/gls.gif " />
541+ < tr class ="row-odd "> < td > < a class ="reference external image-reference " href ="_static/publications/gls.gif "> < img alt ="GLS Logo " class ="publication-thumbnail " src ="_images/gls.gif " />
531542</ a >
532543</ td >
533544< td > < p > < strong > GLS: Geometry-aware 3D Language Gaussian Splatting</ strong > </ p >
0 commit comments