@InProceedings{agravante:ram:2013, author = {Agravante, Don Joven and Cherubini, Andrea and Kheddar, Abderrahmane}, title = {Using vision and haptic sensing for human-humanoid haptic joint actions}, booktitle = {IEEE International Conference on Robotics, Automation and Mechatronics}, year = {2013}, address = {Manila, Philippines}, month = {November 12-November 15}, url = {https://hal-lirmm.ccsd.cnrs.fr/lirmm-00908439/document}, keywords = {Physical Human-Robot Interaction, Human and humanoid skills/cognition/interaction, Human-Robot Collaboratio}, doi = {10.1109/RAM.2013.6758552}, abstract = {Human-humanoid haptic joint actions are collaborative tasks requiring a sustained haptic interaction between both parties. As such, most research in this field has concentrated on how to use solely the robot\textquotesingle s haptic sensing to extract the human partners\textquotesingle intentions. With this information, interaction controllers are designed. In this paper, the addition of visual sensing is investigated and a suitable framework is developed to accomplish this. This is then tested on examples of haptic joint actions namely collaboratively carrying a table. Additionally a visual task is implemented on top of this. In one case, the aim is to keep the table level taking into account gravity. In another case, a freely moving ball is balanced to keep it from falling off the table. The results of the experiments show that the framework is able to utilize both information sources properly to accomplish the task.} }