Skip to content

Commit 05158c9

Browse files
committed
removed duplicates
1 parent 3e02db5 commit 05158c9

File tree

1 file changed

+2
-17
lines changed

1 file changed

+2
-17
lines changed

_bibliography/references.bib

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ @misc{ho2024mapex
102102
primaryclass = {cs.RO},
103103
abstract = {Exploration is a critical challenge in robotics, centered on understanding unknown environments. In this work, we focus on robots exploring structured indoor environments which are often predictable and composed of repeating patterns. Most existing approaches, such as conventional frontier approaches, have difficulty leveraging the predictability and explore with simple heuristics such as `closest first'. Recent works use deep learning techniques to predict unknown regions of the map, using these predictions for information gain calculation. However, these approaches are often sensitive to the predicted map quality or do not reason over sensor coverage. To overcome these issues, our key insight is to jointly reason over what the robot can observe and its uncertainty to calculate probabilistic information gain. We introduce MapEx, a new exploration framework that uses predicted maps to form probabilistic sensor model for information gain estimation. MapEx generates multiple predicted maps based on observed information, and takes into consideration both the computed variances of predicted maps and estimated visible area to estimate the information gain of a given viewpoint. Experiments on the real-world KTH dataset showed on average 12.4% improvement than representative map-prediction based exploration and 25.4% improvement than nearest frontier approach.}
104104
}
105-
@inproceedings{ho2024map2,
105+
@inproceedings{ho2024map,
106106
title = {Map It Anywhere (MIA): Empowering Bird's Eye View Mapping using Large-scale Public Data},
107107
author = {Ho, Cherie and Zou, Jiaye and Alama, Omar and Kumar , Sai Mitheran Jagadesh and Chiang, Benjamin and Gupta, Taneesh and Wang, Chen and Keetha, Nikhil and Sycara, Katia and Scherer, Sebastian},
108108
year = 2024,
@@ -111,13 +111,6 @@ @inproceedings{ho2024map2
111111
code = {https://github.com/MapItAnywhere/MapItAnywhere},
112112
abstract = {Top-down Bird's Eye View (BEV) maps are a popular representation for ground robot navigation due to their richness and flexibility for downstream tasks. While recent methods have shown promise for predicting BEV maps from First-Person View (FPV) images, their generalizability is limited to small regions captured by current autonomous vehicle-based datasets. In this context, we show that a more scalable approach towards generalizable map prediction can be enabled by using two large-scale crowd-sourced mapping platforms, Mapillary for FPV images and OpenStreetMap for BEV semantic maps. We introduce Map It Anywhere (MIA), a data engine that enables seamless curation and modeling of labeled map prediction data from existing open-source map platforms. Using our MIA data engine, we display the ease of automatically collecting a 1.2 million FPV & BEV pair dataset encompassing diverse geographies, landscapes, environmental factors, camera models & capture scenarios. We further train a simple camera model-agnostic model on this data for BEV map prediction. Extensive evaluations using established benchmarks and our dataset show that the data curated by MIA enables effective pretraining for generalizable BEV map prediction, with zero-shot performance far exceeding baselines trained on existing datasets by 35%. Our analysis highlights the promise of using large-scale public maps for developing & testing generalizable BEV perception, paving the way for more robust autonomous navigation.}
113113
}
114-
@inproceedings{ho2024map1,
115-
title = {Map It Anywhere (MIA): Empowering Bird's Eye View Mapping using Large-scale Public Data},
116-
author = {Ho, Cherie and Zou, Jiaye and Alama, Omar and Mitheran, Sai and Chiang, Benjamin and Gupta, Taneesh and Wang, Chen and Keetha, Nikhil and Sycara, Katia and Scherer, Sebastian},
117-
year = 2024,
118-
booktitle = {Advances in Neural Information Processing Systems (NeurIPS)},
119-
url = {https://mapitanywhere.github.io}
120-
}
121114
@misc{hughes2024multi-robot,
122115
title = {Multi-Robot Planning for Filming Groups of Moving Actors Leveraging Submodularity and Pixel Density},
123116
author = {Hughes, Skyler and Martin, Rebecca and Corah, Micah and Scherer, Sebastian},
@@ -349,15 +342,7 @@ @article{yu2024i2d-loc++
349342
doi = {10.1109/LRA.2024.3440851},
350343
keywords = {Cameras;Laser radar;Three-dimensional displays;Optical flow;Robot localization;Image motion analysis;Visualization;Camera localization;lidar maps;2d–3d correspondence;flow estimation}
351344
}
352-
@inproceedings{zhao2024subt-mrs1,
353-
title = {{SubT-MRS} Dataset: Pushing SLAM Towards All-weather Environments},
354-
author = {Zhao, Shibo and Gao, Yuanjun and Wu, Tianhao and Singh, Damanpreet and Jiang, Rushan and Sun, Haoxiang and Sarawata, Mansi and Whittaker, Warren C and Higgins, Ian and Su, Shaoshu and Du, Yi and Xu, Can and Keller, John and Karhade, Jay and Nogueira, Lucas and Saha, Sourojit and Qiu, Yuheng and Zhang, Ji and Wang, Wenshan and Wang, Chen and Scherer, Sebastian},
355-
year = 2024,
356-
booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
357-
url = {https://arxiv.org/pdf/2307.07607.pdf},
358-
video = {https://youtu.be/mkN72Lv8S7A}
359-
}
360-
@inproceedings{zhao2024subt-mrs2,
345+
@inproceedings{zhao2024subt-mrs,
361346
title = {{SubT-MRS} Dataset: Pushing SLAM Towards All-weather Environments},
362347
author = {Zhao, Shibo and Gao, Yuanjun and Wu, Tianhao and Singh, Damanpreet and Jiang, Rushan and Sun, Haoxiang and Sarawata, Mansi and Whittaker, Warren C and Higgins, Ian and Su, Shaoshu and Du, Yi and Xu, Can and Keller, John and Karhade, Jay and Nogueira, Lucas and Saha, Sourojit and Qiu, Yuheng and Zhang, Ji and Wang, Wenshan and Wang, Chen and Scherer, Sebastian},
363348
year = 2024,

0 commit comments

Comments
 (0)