See
Google Scholar page for a complete list.
World Model on Million-Length Video And Language With Blockwise RingAttention
Hao Liu*, Wilson Yan*, Matei Zaharia, Pieter Abbeel
Arxiv, 2024
bib |
paper |
code |
project |
tl;dr
@article{liu2024world,
title={World Model on Million-Length Video And Language With RingAttention
},
author={Liu, Hao and Yan, Wilson and Zaharia, Matei and Abbeel, Pieter},
journal={arXiv preprint arXiv:2402.08268},
year={2024}
}
Ring Attention with Blockwise Transformers for Near-Infinite Context
Hao Liu, Matei Zaharia, Pieter Abbeel
International Conference on Learning Representations(ICLR), 2024
bib |
paper |
code |
media |
tl;dr
@article{liu2023ring,
title={Ring Attention with Blockwise Transformers for Near-Infinite Context},
author={Liu, Hao and Zaharia, Matei and Abbeel, Pieter},
journal={International Conference on Learning Representations(ICLR)},
year={2024}
}
Blockwise Parallel Transformer for Large Context Models
Hao Liu, Pieter Abbeel
Advances in Neural Information Processing Systems(NeurIPS)(Spotlight Presentation), 2023
bib |
paper | code |
tl;dr
@article{liu2023blockwise,
title={Blockwise Parallel Transformer for Large Context Models},
author={Liu, Hao and Abbeel, Pieter},
journal={Advances in neural information processing systems},
year={2023}
}
Language Quantized AutoEncoders: Towards Unsupervised Text-Image Alignment
Hao Liu, Wilson Yan, Pieter Abbeel
Advances in Neural Information Processing Systems(NeurIPS), 2023
bib |
paper | code |
tl;dr
@article{liu2023language,
title={Language Quantized AutoEncoders: Towards Unsupervised Text-Image Alignment},
author={Liu, Hao and Yan, Wilson and Abbeel, Pieter},
journal={Advances in neural information processing systems},
year={2023}
}
Chain of Hindsight Aligns Language Models with Feedback
Hao Liu, Carmelo Sferrazza, Pieter Abbeel
International Conference on Learning Representations(ICLR), 2024
bib |
paper | code |
tl;dr
@article{liu2023chain,
title={Chain of hindsight aligns language models with feedback},
author={Liu, Hao and Sferrazza, Carmelo and Abbeel, Pieter},
journal={International Conference on Learning Representations(ICLR)},
year={2024}
}
Emergent Agentic Transformer from Chain of Hindsight Experience
Hao Liu, Pieter Abbeel
International Conference on Machine Learning(ICML), 2023
bib |
paper |
tl;dr
@inproceedings{liu2023emergent,
title={Emergent Agentic Transformer from Chain of Hindsight Experience},
author={Liu, Hao and Abbeel, Pieter},
booktitle={International Conference on Machine Learning},
year={2023}
}
Masked Autoencoding for Scalable and Generalizable Decision Making
Fangchen Liu*, Hao Liu*, Aditya Grover, Pieter Abbeel
Advances in Neural Information Processing Systems(NeurIPS), 2022
bib |
paper | [code |
tl;dr
@inproceedings{liu2022masked,
title={Masked Autoencoding for Scalable and Generalizable Decision Making},
author={Liu, Fangchen and Liu, Hao and Grover, Aditya and Abbeel, Pieter},
booktitle={Advances in Neural Information Processing Systems},
year={2022}
}
Palm up: Playing in the Latent Manifold for Unsupervised Pretraining
Hao Liu, Tom Zahavy, Volodymyr Mnih, Satinder Singh
Advances in Neural Information Processing Systems(NeurIPS), 2022
bib |
paper |
tl;dr
@inproceedings{liu2022palm,
title={Palm up: Playing in the Latent Manifold for Unsupervised Pretraining},
author={Liu, Hao and Zahavy, Tom and Mnih, Volodymyr and Singh, Satinder},
booktitle={Advances in Neural Information Processing Systems},
year={2022}
}
URLB: Unsupervised Reinforcement Learning Benchmark.
Michael Laskin, Denis Yarats, Hao Liu, Kimin Lee, Albert Zhan, Kevin Lu, Catherine Cang,
Lerrel Pinto, Pieter Abbeel
NeurIPS 2021 Track Datasets and Benchmarks, 2021
bib |
paper | code |
tl;dr
@article{laskin2021urlb,
title={URLB: Unsupervised Reinforcement Learning Benchmark},
author={Laskin, Michael and Yarats, Denis and Liu, Hao and Lee, Kimin and Zhan, Albert and Lu, Kevin and Cang, Catherine and Pinto, Lerrel and Abbeel, Pieter},
journal={arXiv preprint arXiv:2110.15191},
year={2021}
}
APS: Active Pre-Training with Successor Features
Hao Liu, Pieter Abbeel
International Conference on Machine Learning(ICML)(Long Oral Presentation), 2021.
bib |
paper | code
@inproceedings{liu2021aps,
title={APS: Active Pre-Training with Successor Features},
author={Liu, Hao and Abbeel, Pieter},
booktitle={International Conference on Machine Learning},
year={2021}
}
Behavior From the Void: Unsupervised Active Pre-Training
Hao Liu, Pieter Abbeel
Advances in Neural Information Processing Systems(NeurIPS)(Spotlight Presentation), 2021.
bib |
paper | code |
tl;dr
@inproceedings{liu2021behavior,
title={Behavior From the Void: Unsupervised Active Pre-Training},
author={Liu, Hao and Abbeel, Pieter},
booktitle={Advances in Neural Information Processing Systems},
year={2021}
}