Chaoqi Liu1, Haonan Chen1, Sigmund H. HΓΈeg2, Shaoxiong Yao1, Yunzhu Li3, Kris Hauser1, Yilun Du4
1University of Illinois Urbana-Champaign Β 2Norwegian University of Science and Technology Β 3Columbia University Β 4Harvard University
IEEE Robotics and Automation Letters (RA-L), 2026
# Clone the repository
git clone https://github.com/Chaoqi-LIU/fdp.git
cd fdp
# Install uv if you haven't already
curl -LsSf https://astral.sh/uv/install.sh | sh
# Sync dependencies and install the package
uv sync
uv pip install -e .fdp/
βββ fdp/
β βββ common/ # Shared utilities (checkpointing, logging, replay buffer, etc.)
β βββ config/ # Hydra config files
β β βββ train_factorpolicy.yaml
β βββ dataset/ # Dataset loading utilities
β βββ env/ # Environment implementations
β β βββ rlbench/ # RLBench environment wrapper
β βββ env_runner/ # Environment runners for evaluation
β βββ gymnasium_util/ # Gymnasium wrappers (async/sync vector envs, video recording)
β βββ model/
β β βββ common/ # Normalizers, LR schedulers, etc.
β β βββ diffusion/ # Transformer and UNet backbone implementations
β βββ perception/ # Observation encoders (vision + state)
β βββ policy/
β β βββ factorpolicy.py # FactorizedDiffusionTransformerPolicy (main model)
β βββ workspace/
β βββ train_policy.py # Training entrypoint
@ARTICLE{liu2026factorizeddiffusionpolicy,
author={Liu, Chaoqi and Chen, Haonan and HΓΈeg, Sigmund H. and Yao, Shaoxiong and Li, Yunzhu and Hauser, Kris and Du, Yilun},
journal={IEEE Robotics and Automation Letters},
title={Flexible Multitask Learning With Factorized Diffusion Policy},
year={2026},
volume={11},
number={4},
pages={4697-4704},
doi={10.1109/LRA.2026.3664611}
}