Learning complex navigation behaviors in simulation and successfully transferring them to the real world remains a significant challenge due to the sim-to-real gap. To address this issue, we propose a hybrid reconstruction approach that combines Light Detection and Ranging (LiDAR) data and monocular images to create visually and structurally realistic simulated environments. Specifically, we extract an accurate mesh from LiDAR measurements and integrate it with geometry-consistent Three-Dimensional Gaussian Splatting (3DGS) to render photorealistic images. Additionally, we present an end-to-end neural network designed to generate control commands for a mobile robot, and it is used to assess the effectiveness of the hybrid reconstructions in narrowing the sim-to-real gap. In the proposed model, we introduce a novel auxiliary branch and define loss functions to improve the network’s ability to learn safe and robust actions. The method, which is based on imitation learning, is evaluated by deploying it on a real robot to navigate over a total distance of 3.5 kilometers, and experimental results show that models trained purely in simulation achieve performance comparable to those trained on real-world data.
@article{olivas2026eaai,
title = {Sim-to-real transfer by hybrid Gaussian Splatting and geometric reconstruction for autonomous driving},
author = {Alejandro Olivas and Miguel Ángel Muñoz-Bañón and Fernando Torres},
journal = {Engineering Applications of Artificial Intelligence},
volume = {172},
pages = {114372},
year = {2026},
issn = {0952-1976},
doi = {https://doi.org/10.1016/j.engappai.2026.114372},
url = {https://www.sciencedirect.com/science/article/pii/S0952197626006536},
}