@InProceedings{singh:icra:2021, author = {Singh, Rohan, Pratap and Benallegue, Mehdi and Yoshiyasu, Yusuke and Kanehiro, Fumio}, title = {Rapid Pose Label Generation through Sparse Representation of Unknown Objects}, booktitle = {IEEE International Conference on Robotics and Automation}, year = {2021}, pages = {10287--10293}, address = {Xi\textquotesingle an, China}, month = {May 30-June 5}, url = {https://arxiv.org/pdf/2011.03790.pdf}, keywords = {deep learning, pose estimation, label generation}, doi = {10.1109/ICRA48506.2021.9561277}, abstract = {Deep Convolutional Neural Networks (CNNs) have been successfully deployed on robots for 6-DoF object pose estimation through visual perception. However, obtaining labeled data on a scale required for the supervised training of CNNs is a difficult task - exacerbated if the object is novel and a 3D model is unavailable. To this end, this work presents an approach for rapidly generating real-world, pose-annotated RGB-D data for unknown objects. Our method not only circumvents the need for a prior 3D object model (textured or otherwise) but also bypasses complicated setups of fiducial markers, turntables, and sensors. With the help of a human user, we first source minimalistic labelings of an ordered set of arbitrarily chosen keypoints over a set of RGB-D videos. Then, by solving an optimization problem, we combine these labels under a world frame to recover a sparse, keypoint-based representation of the object. The sparse representation leads to the development of a dense model and the pose labels for each image frame in the set of scenes. We show that the sparse model can also be efficiently used for scaling to a large number of new scenes. We demonstrate the practicality of the generated labeled dataset by training a pipeline for 6-DoF object pose estimation and a pixel-wise segmentation network.} }