@InProceedings{singh:sii:2020, author = {Singh, Rohan, P. and Kumagai, Iori and Gabas, Antonio and Benallegue, Mehdi and Yoshiyasu, Yusuke and Kanehiro, Fumio}, title = {Instance-specific 6-DoF Object Pose Estimation from Minimal Annotations}, booktitle = {IEEE/SICE International Symposium on System Integration}, year = {2020}, pages = {109--114}, address = {Honolulu (HI), USA}, month = {January 12-January 15}, url = {https://ieeexplore.ieee.org/document/9026239}, keywords = {Robotics technology, Human-Robot cooperation/interaction, Software systems}, doi = {10.1109/SII46433.2020.9026239}, abstract = {In many robotic applications, the environment setting in which the 6-DoF pose estimation of a known, rigid object and its subsequent grasping is to be performed, remains nearly unchanging and might even be known to the robot in advance. In this paper, we refer to this problem as instance-specific pose estimation: the robot is expected to estimate the pose with a high degree of accuracy in only a limited set of familiar scenarios. Minor changes in the scene, including variations in lighting conditions and background appearance, are acceptable but drastic alterations are not anticipated. To this end, we present a method to rapidly train and deploy a pipeline for estimating the continuous 6-DoF pose of an object from a single RGB image. The key idea is to leverage known camera poses and rigid body geometry to partially automate the generation of a large labeled dataset. The dataset, along with sufficient domain randomization, is then used to supervise the training of deep neural networks for predicting semantic keypoints. Experimentally, we demonstrate the convenience and effectiveness of our proposed method to accurately estimate object pose requiring only a very small amount of manual annotation for training} }