2023年度
[1] Takuya Kiyokawa, Naoki Shirakura, Wang Zhenting, Natsuki Yamanobe, Ixchel G. Ramirez-Alpiza, Wan Weiwei, Kensuke Harada, ‘Difficulty and Complexity Definitions for Assembly Task Allocation and Assignment in Human-Robot Collaboration’,
Robotics and Computer-Integrated Manufacturing, vol. 84, 2023.
▶︎[Bib TeX]
[1] Takuya Kiyokawa, Naoki Shirakura, Zhenting Wang, Natsuki Yamanobe, Ixchel G. Ramirez-Alpiza, Weiwei Wan, Kensuke Harada, ‘Difficulty and Complexity Definitions for Assembly Task Allocation and Assignment in Human-Robot Collaboration’. 2023.
@Article{2022011936,
author = {Takuya Kiyokawa, Naoki Shirakura, Wang Zhenting, Natsuki Yamanobe, Ixchel G. Ramirez-Alpiza, Wan Weiwei, Kensuke Harada},
journal = {ROBOTICS AND COMPUTER-INTEGRATED MANUFACTURING},
title = {Difficulty and Complexity Definitions for Assembly Task Allocation and Assignment in Human-Robot Collaboration},
year = {2023},
issn = {0736-5845},
number = {102598},
pages = {1--24},
volume = {84},
abstract = {This paper presents a literature review of the different aspects concerning task allocation and assignment problems for human-robot collaboration in industrial assembly environments. In future advanced industrial environments, robots and humans will share the same workspace and collaborate to efficiently achieve shared goals. The ultimate goal of this study is to clarify the definitions of difficulty and complexity used to encourage effective collaborations between humans and robots to take advantage of the adaptability of humans and the autonomy of robots.},
date = {2023-6},
language = {English},
publisher = {PERGAMON-ELSEVIER SCIENCE LTD},
}
[2] Erich Floris, Bruno Leme, Noriaki Ando, Ryo Hanai, Yukiyasu Domae, ‘Learning Depth Completion of Transparent Objects using Augmented Unpaired Data’, in proceedings of 2023 IEEE International Conference on Robotics and Automation (ICRA), 2023.
▶︎[Bib TeX]
[2] Erich Floris, Bruno Leme, Noriaki Ando, Ryo Hanai, Yukiyasu Domae, ‘Learning Depth Completion of Transparent Objects using Augmented Unpaired Data’. 2023.
@InProceedings{2023001109,
author = {Erich Floris, Bruno Leme, Noriaki Ando, Ryo Hanai, Yukiyasu Domae},
booktitle = {Proceedings of 2023 IEEE International Conference on Robotics and Automation (ICRA)},
title = {Learning Depth Completion of Transparent Objects using Augmented Unpaired Data},
year = {2023},
publisher = {IEEE},
abstract = {We propose a technique for depth completion of transparent objects using augmented data captured directly from real environments with complicated geometry. Using cyclic adversarial learning we train translators to convert between painted versions of the objects and their real transparent counterpart. The translators are trained on unpaired data, hence datasets can be created rapidly and without any manual labelling. Our technique does not make any assumptions about the geometry of the environment, unlike previous SOTA systems that for example assume easily observable occlusion and contact edges. We show how our technique outperforms a SOTA approach, ClearGrasp, that is not trained on environments that have complicated geometry and makes assumptions about the structure of the environment which makes it less applicable for this scenario. We show how the technique can be used to create an object manipulation application with a robot in a dishwasher environment.},
date = {2023-06-04},
language = {English},
}
[3] Simeon Capy, Enrique Coronado, Pablo Osorio, Shohei Hagane, Dominique Deuff, Gentiane Venture, ‘Integration of a Presence Robot in a Smart Home’, in proceedings of 2023 3rd International Conference on Computer, Control and Robotics (ICCCR), 2023.
▶︎[Bib TeX]
[3] Simeon Capy, Enrique Coronado, Pablo Osorio, Shohei Hagane, Dominique Deuff, Gentiane Venture, ‘Integration of a Presence Robot in a Smart Home’. 2023.
@InProceedings{2023001602,
author = {Simeon Capy, Enrique Coronado, Pablo Osorio, Shohei Hagane, Dominique Deuff, Gentiane Venture},
booktitle = {Proceedings of 2023 3rd International Conference on Computer, Control and Robotics (ICCCR)},
title = {Integration of a Presence Robot in a Smart Home},
year = {2023},
pages = {192--197},
publisher = {IEEE},
abstract = {In this work, we introduce a novel solution for configuring a Raspberry Pi (a popular single-board computer) as a central unit for a home robot, leveraging shared data to adapt the robot’s behaviours. Our solution utilises a smartphone application connected via Bluetooth to remotely configure the Raspberry Pi, following state-of-the-art standards for IoT device configuration. To validate the usability of our system, we conducted a user study, which showed that the application is robust enough for deployment and can be easily used by non-technical individuals. In conclusion, our proposed solution offers several benefits over traditional methods, including ease of use and the ability to extend the method to other devices, offering a convenient and accessible way to bring adaptability to the growing market of smart homes through the use of robots.},
date = {2023-08-01},
language = {English},
}
[4] Sari Toyoguchi, Enrique Coronado, Gentiane Venture, ‘A human-centered and adaptive robotic system using deep learning and Adaptive Predictive Controllerse’, Journal of Robotics and Mechatronics, vol. 35., No. 3, pp. 834-843, 2023.
▶︎[Bib TeX]
[4] Sari Toyoguchi, Enrique Coronado, Gentiane Venture, ‘A human-centered and adaptive robotic system using deep learning and Adaptive Predictive Controllers’. 2023.
@Article{2023003320,
author = {Sari Toyoguchi, Enrique Coronado, Gentiane Venture},
journal = {Journal of Robotics and Mechatronics},
title = {A human-centered and adaptive robotic system using deep learning and Adaptive Predictive Controllers},
year = {2023},
issn = {0915-3942},
number = {3},
pages = {834--843},
volume = {35},
abstract = {The rise of single-person households coupled with a drop in social interaction due to the COVID-19 pandemic is triggering a loneliness pandemic. This social issue is producing mental health conditions (e.g., depression and stress) not only in the elderly population but also in young adults. In this context, social robots emerge as human-centered robotics technology that can potentially reduce mental health distress produced by social isolation. However, current robotics systems still do not reach a sufficient communication level to produce an effective coexistence with humans. This paper contributes to the ongoing efforts to produce a more seamless Human-Robot Interaction. For this, we present a novel cognitive architecture that uses: (i) deep learning methods for mood recognition from visual and voice modalities, (ii) personality and mood models for adaptation of robot behaviors, and (iii) Adaptive Generalized Predictive Controllers (AGPC) to produce suitable robot reactions. Experimental results indicate that our proposed system influenced people’s moods, potentially reducing stress levels during Human-Robot Interaction},
date = {2023-6},
language = {English},
publisher = {FUJI TECHNOLOGY PRESS LTD},
}
[5] Ryo Hanai, Yukiyasu Domae, Ixchel G. Ramirez-Alpiza, Bruno Leme, Tetsuya Ogata, ‘Force Map: Learning to Predict Contact Force Distribution from Vision’, in proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023.
▶︎[Bib TeX]
[5] Ryo Hanai, Yukiyasu Domae, Ixchel G. Ramirez-Alpiza, Bruno Leme, Tetsuya Ogatae, ‘Force Map: Learning to Predict Contact Force Distribution from Vision’. 2023.
@InProceedings{2023003761,
author = {Ryo Hanai, Yukiyasu Domae, Ixchel G. Ramirez-Alpiza, Bruno Leme, Tetsuya Ogata},
booktitle = {Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
title = {Force Map: Learning to Predict Contact Force Distribution from Vision},
year = {2023},
publisher = {IEEE},
abstract = {When humans see a scene, they can roughly imagine the forces applied to objects based on their experience and use them to handle the objects properly. This paper considers transferring this “force-visualization” ability to robots. We hypothesize that a rough force distribution (named “force map”) can be utilized for object manipulation strategies even if accurate force estimation is impossible. Based on this hypothesis, we propose a training method to predict the force map from vision. To investigate this hypothesis, we generated scenes where objects were stacked in bulk through simulation and trained a model to predict the contact force from a single image. We further applied domain randomization to make the trained model function on real images. The experimental results showed that the model trained using only synthetic images could predict approximate patterns representing the contact areas of the objects even for real images. Then, we designed a simple algorithm to plan a lifting direction using the predicted force distribution. We confirmed that using the predicted force distribution contributes to finding natural lifting directions for typical real-world scenes. Furthermore, the evaluation through simulations showed that the disturbance caused to surrounding objects was reduced by 26 % (translation displacement) and by 39 % (angular displacement) for scenes where objects were overlapping.},
date = {2023-10},
language = {English},
}
[6] Enrique Coronado, Toshifumi Shinya, Gentian Venture, ‘Hold My Hand: Development of a Force Controller and System Architecture for Joint Walking with a Companion Robot’, SENSORS, vol. 23, no. 12, 2023.
▶︎[Bib TeX]
[6] Enrique Coronado, Toshifumi Shinya, Gentian Venture, ‘Hold My Hand: Development of a Force Controller and System Architecture for Joint Walking with a Companion Robot’. 2023.
@Article{2023005459,
author = {Enrique Coronado, Toshifumi Shinya, Gentian Venture},
journal = {SENSORS},
title = {Hold My Hand: Development of a Force Controller and System Architecture for Joint Walking with a Companion Robot},
year = {2023},
issn = {1424-8220},
number = {12},
pages = {1--17},
volume = {23},
abstract = {In recent years, there has been a growing interest in the development of robotic systems for improving the quality of life of individuals of all ages. Specifically, humanoid robots offer advantages in terms of friendliness and ease of use in such applications. This article proposes a novel system architecture that enables a commercial humanoid robot, specifically the Pepper robot, to walk side-by-side while holding hands, and communicating by responding to the surrounding environment. To achieve this control, an observer is required to estimate the force applied to the robot. This was accomplished by comparing joint torques calculated from the dynamics model to actual current measurements. Additionally, object recognition was performed using Pepper's camera to facilitate communication in response to surrounding objects. By integrating these components, the system has demonstrated its capability to achieve its intended purpose.},
date = {2023-6},
language = {English},
publisher = {MDPI},
}
[7] Tomohiro Motoda, Damien Petit, Takao Nishi, Kazuyuki Nagata, Weiwei Wan, Kensuke Harada, ‘Multi-step Object Extraction Planning from Clutter based on Support Relations’, IEEE Access, vol. 11, 2023.
▶︎[Bib TeX]
[7] Tomohiro Motoda, Damien Petit, Takao Nishi, Kazuyuki Nagata, Weiwei Wan, Kensuke Harada, ‘Multi-step Object Extraction Planning from Clutter based on Support Relations’. 2023.
@Article{2023006722,
author = {Tomohiro Motoda, Damien Petit, Takao Nishi, Kazuyuki Nagata, Weiwei Wan, Kensuke Harada},
journal = {IEEE Access},
title = {Multi-step Object Extraction Planning from Clutter based on Support Relations},
year = {2023},
issn = {2169-3536},
pages = {45129--45139},
volume = {11},
abstract = {To automate operations in a logistic warehouse, a robot needs to extract items from the clutter on a shelf without collapsing the clutter. To address this problem, this study proposes a multi-step motion planner to stably extract an item by using the support relations of each object included in the clutter. This study primarily focuses on safe extraction, which allows the robot to choose the best next action based on limited observations. By estimating the support relations, we construct a collapse prediction graph to obtain the appropriate order of object extraction. Thus, the target object can be extracted without collapsing the pile. Furthermore, we show that the efficiency of the robot is improved if it uses one of its arms to extract the target object while the other supports a neighboring object. The proposed method is evaluated in realworld experiments on detecting support relations and object extraction tasks. This study makes a significant contribution because the experimental results indicate that the robot can estimate support relations based on collapse predictions and perform safe extraction in real environments. Our multi-step extraction plan ensures both better performance and robustness to achieve safe object extraction tasks from the clutter.},
date = {2023-5},
language = {English},
publisher = {IEEE},
}
[8] Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada, ‘Learning efficient policies for picking entangled wire harnesses’, in Proceedings of IEEE International Conference on Robotics and Automation (ICRA), 2023.
▶︎[Bib TeX]
[8] Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada, ‘Learning efficient policies for picking entangled wire harnesses’. 2023.
@InProceedings{2023008828,
author = {Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada},
booktitle = {Proceedings of IEEE International Conference on Robotics and Automation (ICRA)},
title = {Learning efficient policies for picking entangled wire harnesses},
year = {2023},
publisher = {IEEE},
abstract = {Wire harnesses are essential connecting components in manufacturing industry but are challenging to be automated in industrial tasks such as bin picking. They are long, flexible and tend to get entangled when randomly placed in a bin. This makes it difficult for the robot to grasp a single one in dense clutter. Besides, training or collecting data in simulation is challenging due to the difficulties in modeling the combination of deformable and rigid components for wire harnesses. In this work, instead of directly lifting wire harnesses, we propose to grasp and extract the target following a circle-like trajectory until it is untangled. We learn a policy from real-world data that can infer grasps and separation actions from visual observation. Our policy enables the robot to efficiently pick and separate entangled wire harnesses by maximizing success rates and reducing execution time. To evaluate our policy, we present a set of real-world experiments on picking wire harnesses. Our policy achieves an overall 84.6% success rate compared with 49.2% in baseline. We also evaluate the effectiveness of our policy under different clutter scenarios using unseen types of wire harnesses. Results suggest that our approach is feasible for handling wire harnesses in industrial bin picking.},
date = {2023-6},
language = {English},
}
[9] Shusei Nagato, Tomohiro Motoda, Takao Nishi, Damien Petit, Takuya Kiyokawa, Weiwei Wan, Kensuke Harada, ‘Probabilistic Slide-support Manipulation Planning in Clutter’, in Proceeding of 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023.
▶︎[Bib TeX]
[9] Shusei Nagato, Tomohiro Motoda, Takao Nishi, Damien Petit, Takuya Kiyokawa, Weiwei Wan, Kensuke Harada, ‘Probabilistic Slide-support Manipulation Planning in Clutter’. 2023.
@InProceedings{2023010572,
author = {Shusei Nagato, Tomohiro Motoda, Takao Nishi, Damien Petit, Takuya Kiyokawa, Weiwei Wan, Kensuke Harada},
booktitle = {Proceeding of 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
title = {Probabilistic Slide-support Manipulation Planning in Clutter},
year = {2023},
pages = {1016--1022},
publisher = {IEEE},
abstract = {To safely and efficiently extract an object from the clutter, this paper presents a bimanual manipulation planner in which one hand of the robot is used to slide the target object out of the clutter while the other hand is used to support the surrounding objects to prevent the clutter from collapsing. Our method uses a neural network to predict the physical phenomena of the clutter when the target object is moved. We generate the most efficient action based on the Monte Carlo tree search.The grasping and sliding actions are planned to minimize the number of motion sequences to pick the target object. In addition, the object to be supported is determined to minimize the position change of surrounding objects. Experiments with a real bimanual robot confirmed that the robot could retrieve the target object, reducing the total number of motion sequences and improving safety.},
date = {2023-10},
issn = {2153-0866},
language = {English},
}
[10] Yukiko Osawa, Kei Kase, Yoshiyuki Furukawa, Yukiyasu Domae, ‘Active heat flow sensing for robust material identification’, IEEE Access, vol. 11, 2023.
▶︎[Bib TeX]
[10] Yukiko Osawa, Kei Kase, Yoshiyuki Furukawa, Yukiyasu Domae, ‘Active heat flow sensing for robust material identification’. 2023.
@Article{2023009568,
author = {Yukiko Osawa, Kei Kase, Yoshiyuki Furukawa, Yukiyasu Domae},
journal = {IEEE Access},
title = {Active heat flow sensing for robust material identification},
year = {2023},
issn = {2169-3536},
abstract = {Thermal properties are significant for recognizing an object’s material but cannot be determined via visual and stiffness (or tactile) ?based recognition techniques. Most studies have used temperature as a complementary part of multimodal sensing; however, the thermal signal is an unexplored capability that can be beneficial for recognizing target objects. Since changes in thermal responses can result from both material properties and initial temperature, realizing robust and high-accuracy recognition in different environments is a challenging issue. To tackle the issue, this paper proposes a novel strategy for material identification that can actively measure heat flow by heating and cooling a robot gripper, enabling the extraction of the thermal properties of contact materials regardless of the object’s initial temperature variation (referred to as “active heat flow sensing”). We use a robotic task as an example of one possible application of the proposed strategy. For this, we developed a gripper pad embedded in a temperature control system and heat flow sensor to monitor the thermal exchange during contact with a target object. The paper conducted some experiments divided into two scenarios. The first experimental results show that active heat flow sensing is realized within 0.4 sec from first contact for 100 % classification of four heated materials. The second experimental results show that the three materials, whose thermal properties are largely different, can be classified within 0.7 sec from first contact using different initial temperatures of the training and test data. These results suggest robustness against environmental change, which has been difficult using conventional temperature-based methods.},
date = {2023-12-18},
language = {English},
publisher = {IEEE},
}
[11] Yukiko Osawa, Yukiyasu Domae, Ichiro Ogura, Yoshiyuki Furukawa, Abderrahmane Kheddar, ‘Robotic Thermoregulation for Material Identification using Recycled Inner-Generated Motor Heat’, in Proceedings of IEEE International Conference on Robotics and Biomimetics, 2023.
▶︎[Bib TeX]
[11] Yukiko Osawa, Yukiyasu Domae, Ichiro Ogura, Yoshiyuki Furukawa, Abderrahmane Kheddar, ‘Robotic Thermoregulation for Material Identification using Recycled Inner-Generated Motor Heat’. 2023.
@InProceedings{2023011440,
author = {Yukiko Osawa, Yukiyasu Domae, Ichiro Ogura, Yoshiyuki Furukawa, Abderrahmane Kheddar},
booktitle = {Proceedings of IEEE International Conference on Robotics and Biomimetics},
title = {Robotic Thermoregulation for Material Identification using Recycled Inner-Generated Motor Heat},
year = {2023},
pages = {1--7},
publisher = {IEEE},
abstract = {The heat generated inside robots originates mainly from energy loss in actuators, onboard electronic circuits, and computation processing units. Typically, it can be dissipated by forced air (most commonly used in robotics) or fluid convection. In this study, we developed a recycled inner-generated heat system inspired by biological thermoregulation mechanism, utilizing the inner heat to the robot’s thermal perceptions of a finger-pad (by analogy to human fingertip) for object haptic recognition. Among the fingertips’ thermal perceptions, material recognition identifies and distinguishes touched objects, even if the color, stiffness, or roughness are similar. This recognition approach requires a heat source to induce temperature changes at the contact surface to recognize ambient (room) temperature objects. We use an actuator that generates heat in a closed-flow water circuit (by analogy to human cardiovascular system) to induce heat at contact like humans with body temperature and touch. Our thermal method is assessed through experimental simulations of robotic water circulation and a pump system (by analogy to human heart) with the developed finger-pad. The proposed strategy enables it to completely classify three kinds of material covering the same material in 0.7 sec touch.},
date = {2023-12},
language = {English},
}
[12] Enrique Coronado, Natsuki Yamanobe, Gentiane Venture, ‘NEP+: A Human-Centered Framework for Inclusive Human-Machine Interaction Development’, SENSORS, vol. 23, no. 22, 2023.
▶︎[Bib TeX]
[12] Enrique Coronado, Natsuki Yamanobe, Gentiane Venture, ‘NEP+: A Human-Centered Framework for Inclusive Human-Machine Interaction Development’. 2023.
@Article{2023013180,
author = {Enrique Coronado, Natsuki Yamanobe, Gentiane Venture},
journal = {SENSORS},
title = {NEP+: A Human-Centered Framework for Inclusive Human-Machine Interaction Development},
year = {2023},
issn = {1424-8220},
number = {22},
pages = {1--22},
volume = {23},
abstract = {This article presents the Network Empower and Prototyping Platform (NEP+), a flexible framework purposefully crafted to simplify the process of interactive application development, catering to both technical and non-technical users. The name {"}NEP+{"} encapsulates the platform’s dual mission: to empower the network-related capabilities of ZeroMQ and to provide software tools and interfaces for prototyping and integration. NEP+ accomplishes this through a comprehensive quality model and an integrated software ecosystem encompassing middleware, user-friendly graphical interfaces, a command-line tool, and an accessible end-user programming interface. This article primarily focuses on presenting the proposed quality model and software architecture, illustrating how they can empower developers to craft cross-platform, accessible, and user-friendly interfaces for various applications, with a particular emphasis on robotics and the Internet of Things (IoT). Additionally, we provide practical insights into the applicability of NEP+ by briefly presenting real-world user cases where human-centered projects have successfully utilized NEP+ to develop robotics systems.},
date = {2023-11-12},
language = {English},
publisher = {MDPI},
}
[13] Enrique Coronado, Toshio Ueshiba, Ixchel G. Ramirez-Alpizar, ‘A Path to Industry 5.0 Digital Twins for Human-Robot Collaboration by Bridging NEP+ and ROS’, Robotics, vol. 13, no. 28, 2024.
▶︎[Bib TeX]
[13] Enrique Coronado, Toshio Ueshiba, Ixchel G. Ramirez-Alpizar, ‘A Path to Industry 5.0 Digital Twins for Human-Robot Collaboration by Bridging NEP+ and ROS’. 2023.
@Article{2023016893,
author = {Enrique Coronado, Toshio Ueshiba, Ixchel G. Ramirez-Alpizar},
journal = {Robotics},
title = {A Path to Industry 5.0 Digital Twins for Human-Robot Collaboration by Bridging {NEP}+ and {ROS}},
year = {2024},
issn = {2218-6581},
number = {28},
pages = {1--16},
volume = {13},
abstract = {This article expands and explores the capabilities of the NEP+ framework in the context of integrating Digital Twin-Based Human?Robot Collaborative Systems for Industry 5.0 applications. NEP+ serves as a human-centered framework facilitating the connection of components within human-machine system architectures. Our approach pioneers the harmonization of the techno-centered focus and robust ecosystem provided by the Robot Operating System (ROS) in robotics development with the human-centered focus and cross-platform advantages inherent in NEP+ for constructing Industry 5.0 applications. To foster compatibility between these frameworks, we introduce an initial version of the nep2ros ROS package. Our evaluation revolves around assessing the capabilities of the NEP+ Python library by evaluating communication performance utilizing the JavaScript Object Notation (JSON) and MessagePack as serialization formats. Additionally, we compare the communication performance between the nep2ros package and existing solutions, facilitating the bridging of a simulation environment (Unity) and ROS. Our findings exhibit favorable latency values compared to existing and popular solutions. Nonetheless, NEP+ distinguishes itself due to its potential adaptability across diverse development scenarios beyond the confines of robotics.},
date = {2024-02-01},
language = {English},
publisher = {MDPI},
}
[14] 菱川 祐也, 日下 聖, 田中 孝之, 田中 吉史, 白倉 尚樹, 山野辺 夏樹, Ixchel G. Ramirez-Alpizar, Enrique Coronado, 堂前 幸康
, ‘ネックバンド型センサユニットを用いた作業負担推定’, 第24回計測自動制御学会 システムインテグレーション部門講演会, 2023.
▶︎[Bib TeX]
[14] 菱川 祐也, 日下 聖, 田中 孝之, 田中 吉史, 白倉 尚樹, 山野辺 夏樹, Ixchel G. Ramirez-Alpizar, Enrique Coronado, 堂前 幸康, ‘ネックバンド型センサユニットを用いた作業負担推定’. 2023.
@misc{
}
[15] 酒井 貴史, 長谷川 浩, 蓮沼 仁志, 長嶋 功一, 原口 林太郎, 坂井 亮, 花井 亮, 菅 佑樹, 北村 篤史, 坂本 武志, 安藤 慶昭, 大原 賢一
, ‘人協働のロボットマニピュレータを中心とした 周辺システムとのインターフェースの仕様化 ―人協働のロボットマニピュレータと高度な S/W 技術との連携を目指して―’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[15] 酒井 貴史, 長谷川 浩, 蓮沼 仁志, 長嶋 功一, 原口 林太郎, 坂井 亮, 花井 亮, 菅 佑樹, 北村 篤史, 坂本 武志, 安藤 慶昭, 大原 賢一, ‘人協働のロボットマニピュレータを中心とした 周辺システムとのインターフェースの仕様化 ―人協働のロボットマニピュレータと高度な S/W 技術との連携を目指して―’. 2023.
@misc{
}
[16] 板寺 駿輝, ‘支援性・操作性・訓練性を両立する物理的インタラクション研究’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[16] 板寺 駿輝, ‘支援性・操作性・訓練性を両立する物理的インタラクション研究’. 2023.
@misc{
}
[17] 白倉 尚貴, 山野辺 夏樹, 丸山 翼, 堂前 幸康, 尾形 哲也
, ‘繰り返し作業における作業テンポの指示と作業負荷・生産性の関係調査’, 第24回計測自動制御学会 システムインテグレーション部門講演会, 2023.
▶︎[Bib TeX]
[17] 白倉 尚貴, 山野辺 夏樹, 丸山 翼, 堂前 幸康, 尾形 哲也, ‘繰り返し作業における作業テンポの指示と作業負荷・生産性の関係調査’. 2023.
@misc{
}
[18] 大田 純志, 水野 海渡, 幸地 真央, RamirezAlpizar Ixchel, 東森充
, ‘空圧式剛性可変型ダミー対象物の開発’, 第24回計測自動制御学会 システムインテグレーション部門講演会, 2023.
▶︎[Bib TeX]
[17] 大田 純志, 水野 海渡, 幸地 真央, RamirezAlpizar Ixchel, 東森充, ‘空圧式剛性可変型ダミー対象物の開発’. 2023.
@misc{
}
[19] 日下 聖, 田中 孝之, 菱川 祐也, 田中 吉史, 白倉 尚貴, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, CORONADO ZUNIGA LUIS ENRIQUE, 堂前 幸康
, ‘ネックバンド型センサユニットを用いた作業計測の一提案’, 第29 回作業関連性運動器障害研究会, 2023.
▶︎[Bib TeX]
[19] 日下 聖, 田中 孝之, 菱川 祐也, 田中 吉史, 白倉 尚貴, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, CORONADO ZUNIGA LUIS ENRIQUE, 堂前 幸康, ‘ネックバンド型センサユニットを用いた作業計測の一提案’, 第29 回作業関連性運動器障害研究会’. 2023.
@misc{
}
[20] 大澤 友紀子, 古川 慈之, 堂前 幸康
, ‘熱を感知するロボット手先の材料識別における接触面圧力・時間・温度差の影響’, 第41回日本ロボット学会学術講演会, 2023.
▶︎[Bib TeX]
[20] 大澤 友紀子, 古川 慈之, 堂前 幸康, ‘熱を感知するロボット手先の材料識別における接触面圧力・時間・温度差の影響’. 2023.
@misc{
}
[21] Mustafa Abdullah, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 花井 亮, Floris MarcArden Erich, 尾形 哲也
, ‘Force-Map for Robust Feature Representation and Its Application to Object Manipulation’, 第41回日本ロボット学会学術講演会, 2023.
▶︎[Bib TeX]
[21] Mustafa Abdullah, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 花井 亮, Floris MarcArden Erich, 尾形 哲也, ‘Force-Map for Robust Feature Representation and Its Application to Object Manipulation’. 2023.
@misc{
}
[22] Floris Erich, 牧原 昂志, Mustafa Mohamed, Arafa Abdullah, 堂前 幸康
, ‘Segment Anything and cyclic adversarial learning for handling pick and place of multiple transparent objects’, 第41回日本ロボット学会学術講演会, 2023.
▶︎[Bib TeX]
[22] Floris Erich, 牧原 昂志, Mustafa Mohamed, Arafa Abdullah, 堂前 幸康, ‘Segment Anything and cyclic adversarial learning for handling pick and place of multiple transparent objects’. 2023.
@misc{
}
[23] Floris Erich, 牧原 昂志, Mustafa Mohamed, Arafa Abdullah, 板寺 駿輝, 堂前 幸康
, ‘A survey of spatially embedding language for application in a convenience store’, 第41回日本ロボット学会学術講演会, 2023.
▶︎[Bib TeX]
[23]
loris Erich, 牧原 昂志, Mustafa Mohamed, Arafa Abdullah, 板寺 駿輝, 堂前 幸康, ‘A survey of spatially embedding language for application in a convenience store’. 2023.
@misc{
}
[24] Floris Erich
, ‘Automatic segmentation for neural scanning of food products and household objects’, 第41回日本ロボット学会学術講演会, 2023.
▶︎[Bib TeX]
[24] Floris Erich, ‘Automatic segmentation for neural scanning of food products and household objects’. 2023.
@misc{
}
[25] 板寺 駿輝
, ‘人機械協調システム構築を支援するオープンパッケージOpenHRCの開発’, 第41回日本ロボット学会学術講演会, 2023.
▶︎[Bib TeX]
[25] 板寺 駿輝, ‘人機械協調システム構築を支援するオープンパッケージOpenHRCの開発’. 2023.
@misc{
}
[26] Floris Erich, 安藤 慶昭, 花井 亮, 堂前 幸康
, ‘Learning Depth Completion of Transparent Objects using Augmented Unpaired Data’, Seventh International Workshop on Symbolic-Neural Learning (SNL2023), 2023.
▶︎[Bib TeX]
[26] Floris Erich, 安藤 慶昭, 花井 亮, 堂前 幸康, ‘Learning Depth Completion of Transparent Objects using Augmented Unpaired Data’. 2023.
@misc{
}
[27] Coronado Zuniga Luis Enrique, 山野辺 夏樹, Venture Gentiane
, ‘Bridging Humans, Robots, and Computers using NEP+ tools’, Seventh International Workshop on Symbolic-Neural Learning (SNL2023), 2023.
▶︎[Bib TeX]
[27] Coronado Zuniga Luis Enrique, 山野辺 夏樹, Venture Gentiane, ‘Bridging Humans, Robots, and Computers using NEP+ tools’. 2023.
@misc{
}
[28] 山田 響生, 元田 智大, 家脇 康佑, 西 卓郎, 万 偉偉, 原田 研介
, ‘取り出し困難な密集状態からの把持戦略の自動生成’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[28] 山田 響生, 元田 智大, 家脇 康佑, 西 卓郎, 万 偉偉, 原田 研介, ‘取り出し困難な密集状態からの把持戦略の自動生成’. 2023.
@misc{
}
[29] 元田 智大, ダミアン ジェラルド プティ, 西 卓郎, 永田 和之, 万 偉偉, 原田 研介
, ‘崩れ予測に基づく多段階ばら積みピッキング手法’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[29] 元田 智大, ダミアン ジェラルド プティ, 西 卓郎, 永田 和之, 万 偉偉, 原田 研介, ‘崩れ予測に基づく多段階ばら積みピッキング手法’. 2023.
@misc{
}
[30] 大澤 友紀子, 古川 慈之, 堂前 幸康
, ‘ロボットの自己発熱を活用した認識・制御技術の検討 ―ロボットの血液循環システム実現に向けて―’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[30] 大澤 友紀子, 古川 慈之, 堂前 幸康, ‘ロボットの自己発熱を活用した認識・制御技術の検討 ―ロボットの血液循環システム実現に向けて―’. 2023.
@misc{
}
[31] Xinyi Zhang, 堂前 幸康, Weiwei Wan, 原田 研介
, ‘Initial Experiments on Picking Entangled Wire Harnesses using Dynamic Manipulation’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[31] Xinyi Zhang, 堂前 幸康, Weiwei Wan, 原田 研介, ‘Initial Experiments on Picking Entangled Wire Harnesses using Dynamic Manipulation’. 2023.
@misc{
}
[32] 板寺 駿輝, 堂前 幸康
, ‘生産性に基づいた自律・遠隔ロボット衝突回避時の優先度選択法の提案’, ロボティクス・メカトロニクス 講演会 2023, 2023.
▶︎[Bib TeX]
[32] 板寺 駿輝, 堂前 幸康, ‘生産性に基づいた自律・遠隔ロボット衝突回避時の優先度選択法の提案’. 2023.
@misc{
}
[33] 牧原 昂志, 山田 亮佑, 堂前 幸康, 片岡 裕雄, 原田 研介
, ‘数式ドリブン教師あり学習を用いた把持位置検出’, MIRU2023, 2023.
▶︎[Bib TeX]
[33] 牧原 昂志, 山田 亮佑, 堂前 幸康, 片岡 裕雄, 原田 研介, ‘数式ドリブン教師あり学習を用いた把持位置検出’. 2023.
@misc{
}
[34] Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada, ‘A closed-loop bin picking system for entangled wire harnesses using bimanual and dynamic manipulation’, Robotics and Computer-Integrated Manufacturing, vol. 86, issue C, 2024.
▶︎[Bib TeX]
[34] Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada, ‘A closed-loop bin picking system for entangled wire harnesses using bimanual and dynamic manipulation’. 2024.
@article{20241016
author = {Zhang, Xinyi and Domae, Yukiyasu and Wan, Weiwei and Harada, Kensuke},
title = {A closed-loop bin picking system for entangled wire harnesses using bimanual and dynamic manipulation},
year = {2024},
issue_date = {Apr 2024},
publisher = {Pergamon Press, Inc.},
address = {USA},
volume = {86},
number = {C},
issn = {0736-5845},
url = {https://doi.org/10.1016/j.rcim.2023.102670},
doi = {10.1016/j.rcim.2023.102670},
journal = {Robotics and Computer-Integrated Manufacturing},
month = {feb},
numpages = {9},
keywords = {Robotic bin picking, Dynamic manipulation, Bimanual manipulation, Perception for grasping and manipulation, Entanglement, Separation}
}
[35] Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada, ‘Learning to dexterously pick or separate tangled-prone objects for industrial bin picking’, IEEE Robotics and Automation Letters, vol. 8, no. 8, 2023.
▶︎[Bib TeX]
[35] Xinyi Zhang, Yukiyasu Domae, Weiwei Wan, Kensuke Harada, ‘Learning to dexterously pick or separate tangled-prone objects for industrial bin picking’. 2023.
@ARTICLE{10168919,
author={Zhang, Xinyi and Domae, Yukiyasu and Wan, Weiwei and Harada, Kensuke},
journal={IEEE Robotics and Automation Letters},
title={Learning to Dexterously Pick or Separate Tangled-Prone Objects for Industrial Bin Picking},
year={2023},
volume={8},
number={8},
pages={4919-4926},
keywords={Robots;Clutter;Visualization;Grasping;Affordances;Grippers;Task analysis;Deep learning in grasping and manipulation;grasping},
doi={10.1109/LRA.2023.3291271}}
[36] 堂前 幸康, 丸山 翼, 植芝 俊夫, 多田 充徳, ‘デジタルツインコンピューティング: 2. 生産性と人負担軽減を両立するデジタルツイン’, 情報処理, vol. 64, no. 11, 2023.
▶︎[Bib TeX]
[36] 堂前 幸康, 丸山 翼, 植芝 俊夫, 多田 充徳, ‘デジタルツインコンピューティング: 2. 生産性と人負担軽減を両立するデジタルツイン’. 2023.
@article{1390016339425567872,
author="堂前, 幸康 and 丸山, 翼 and 植芝, 俊夫 and 多田, 充徳",
title="デジタルツインコンピューティング:2.生産性と人負担軽減を両立するデジタルツイン",
journal="情報処理",
publisher="情報処理学会",
year="2023",
month="10",
volume="64",
number="11",
pages="e7-e13",
DOI="10.20729/00228357",
URL="https://cir.nii.ac.jp/crid/1390016339425567872"
}
[37] 堂前 幸康, ‘経験拡張:ロボット学習における視覚的仮想経験の生成と応用’, 画像センシングシンポジウム(SSII) 2023, 2023.
▶︎[Bib TeX]
[37] 堂前 幸康, ‘経験拡張:ロボット学習における視覚的仮想経験の生成と応用’. 2024.
@misc{}
[38] Floris Erich, Noriaki Ando, Yusuke Yoshiyasu, ‘Scanning and Affordance Segmentation of Glass and Plastic Bottles’, in proceedings of 2024 IEEE/SICE International Symposium on System Integration (SII), 2024.
▶︎[Bib TeX]
[38] Floris Erich, Noriaki Ando, Yusuke Yoshiyasu, ‘Scanning and Affordance Segmentation of Glass and Plastic Bottles’. 2024.
@INPROCEEDINGS{10417248,
author={Erich, Floris and Ando, Noriaki and Yoshiyasu, Yusuke},
booktitle={2024 IEEE/SICE International Symposium on System Integration (SII)},
title={Scanning and Affordance Segmentation of Glass and Plastic Bottles},
year={2024},
volume={},
number={},
pages={514-519},
keywords={Three-dimensional displays;Training data;Glass;Grasping;Plastics;Plastic products;Task analysis},
doi={10.1109/SII58957.2024.10417248}}
[39] Naoki Shirakura, Natsuki Yamanobe, Tsubasa Maruyama, Yukiyasu Domae, Tetsuya Ogata, ‘Work Tempo Instruction Framework for Balancing Human Workload and Productivity in Repetitive Task’, in Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction, 2024.
▶︎[Bib TeX]
[39] Naoki Shirakura, Natsuki Yamanobe, Tsubasa Maruyama, Yukiyasu Domae, Tetsuya Ogata, ‘Work Tempo Instruction Framework for Balancing Human Workload and Productivity in Repetitive Task’. 2024.
@inproceedings{10.1145/3610978.3640756,
author = {Shirakura, Naoki and Yamanobe, Natsuki and Maruyama, Tsubasa and Domae, Yukiyasu and Ogata, Tetsuya},
title = {Work Tempo Instruction Framework for Balancing Human Workload and Productivity in Repetitive Task},
year = {2024},
isbn = {9798400703232},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3610978.3640756},
doi = {10.1145/3610978.3640756},
abstract = {This paper proposes a feedback framework that adjusts human workload and productivity by instructing work tempo and personalizes work according to individual differences. For feedback of optimal work tempo, this study proposes a human worker state transition model to investigate the effects of work tempo instructions on workload and productivity. Based on the results obtained, we proposed a feedback policy using our proposed worker state transition model. By testing our proposed framework in a picking task, we showed the possibility to balance productivity and workload.},
booktitle = {Companion of the 2024 ACM/IEEE International Conference on Human-Robot Interaction},
pages = {980–984},
numpages = {5},
keywords = {HCI, human-centered system, manufacturing, workload},
location = {, Boulder, CO, USA, },
series = {HRI '24}
}
[40] Zhenting Wang, Takuya Kiyokawa, Issei Sera, Natsuki Yamanobe, Weiwei Wan, Kensuke Harada, ‘Error Correction in Robotic Assembly Planning From Graphical Instruction Manuals’, IEEE Access, vol. 11, 2024.
▶︎[Bib TeX]
[40] Zhenting Wang, Takuya Kiyokawa, Issei Sera, Natsuki Yamanobe, Weiwei Wan, Kensuke Harada, ‘Error Correction in Robotic Assembly Planning From Graphical Instruction Manuals’. 2024.
@ARTICLE{10265248,
author={Wang, Zhenting and Kiyokawa, Takuya and Sera, Issei and Yamanobe, Natsuki and Wan, Weiwei and Harada, Kensuke},
journal={IEEE Access},
title={Error Correction in Robotic Assembly Planning From Graphical Instruction Manuals},
year={2023},
volume={11},
number={},
pages={107276-107286},
keywords={Task analysis;Manuals;Robots;Error correction;Planning;Symbols;Robotic assembly;Planning;Error correction;instruction manual;robotic assembly;task planning},
doi={10.1109/ACCESS.2023.3319822}}
[41] 堂前 幸康, ‘ロボットチャレンジと産業の接点を探る’, RRI・マニピュレーション委員会第1回シンポジウム, 2024.
▶︎[Bib TeX]
[41] 堂前 幸康, ‘ロボットチャレンジと産業の接点を探る’. 2024.
@misc{}
2022年度
[1] 白澤健, 後根充志, 高瀬竜一, 堂前幸康, ‘袋物柔軟重量物に適した自重式鉗子型グリッパの開発’. 2022.
▶︎[Bib TeX]
[1] 白澤健, 後根充志, 高瀬竜一, 堂前幸康, ‘袋物柔軟重量物に適した自重式鉗子型グリッパの開発’. 2022.
@misc{2022001642,
abstract =
{本研究では,ごみ袋のような袋物柔軟重量物に適した自重式鉗子型グリッパを提案する.対象物の自重を利用して把持する機構と鉗子型の把持面により、袋物柔軟重量物に対して優れた把持性能を実現した。また把持力を定式化するとともに,実験でその有効性を検証した.本論文の貢献は,簡易な機構で袋物柔軟重量物の安定した把持を実現したこと,複雑な力制御を不要としたこと,対象物の自重を利用することで把持に要する消費電力を抑制したことにある.},
author = {{白澤健, 後根充志, 高瀬竜一, 堂前幸康}},
date = {2022-06-03},
howpublished = {ロボティクス・メカトロニクス講演会 2022},
language = {Japanese},
title = {袋物柔軟重量物に適した自重式鉗子型グリッパの開発},
year = {2022}
}
[2] 堂前幸康, ‘次世代生産を支えるサイバーフィジカルシステム’. 2022.
▶︎[Bib TeX]
[2] 堂前幸康, ‘次世代生産を支えるサイバーフィジカルシステム’. 2022.
@misc{2022005125,
abstract = {サイバーフィジカルシステム研究棟で進めるロボット系の研究成果を紹介する。},
author = {{堂前幸康}},
date = {2022-05-09},
howpublished = {日本ロボット学会北海道ロボット技術研究専門委員会特別講演会},
language = {Japanese},
title = {次世代生産を支えるサイバーフィジカルシステム},
year = {2022}
}
[3] Vitor Isume, 原田 研介, Weiwei Wan, 堂前幸康, ‘RGB Image-based Craft Assembly System’. 2022.
▶︎[Bib TeX]
[3] Vitor Isume, 原田 研介, Weiwei Wan, 堂前幸康, ‘RGB Image-based Craft Assembly System’. 2022.
@misc{2022008047,
abstract = {The craft assembly task, here, is defined as a type of assembly task, with some unique
characteristics. Given a goal object, the task is to identify the parts and assemble a craft that most
closely resembles that object. However, the unique challenge in a craft assembly is that the available
materials are everyday items such as small boxes, bottle caps and markers. In this case, the available
materials don’t exactly match the parts of the goal object, and a level of abstraction is required to
find the equivalence between the parts of the goal object and the available materials. In our previous
work, we used the geometry and affordance of the available materials as the criteria to select each part
of the goal object. After selecting the materials, the system then needs to plan the assembly sequence
and the grasp.},
author = {{Vitor Isume, 原田 研介, Weiwei Wan, 堂前幸康}},
date = {2022-09-05},
howpublished = {日本ロボット学会学術講演会},
language = {Japanese},
title = {{RGB} Image-based Craft Assembly System},
year = {2022}
}
[4] Xinyi Zhang, 堂前 幸康, 原田 研介, Weiwei Wan, ‘Learning Dexterous Bin Picking Policies for Picking and
Separating Tangled-Prone Parts’. 2022.
▶︎[Bib TeX]
[4] Xinyi Zhang, 堂前 幸康, 原田 研介, Weiwei Wan, ‘Learning Dexterous Bin Picking Policies for Picking and
Separating Tangled-Prone Parts’. 2022.
@misc{2022008048,
abstract = {In this work, we proposed a learning-based approach to overcome the challenges thanks to the
following three key ingredients. (1) We learn a network to predict an affordance map that can classify
picking or separation actions and the grasp points at the same time. (2) We learn an action-conditioned
network to plan separation motions for entangled objects. The network uses visual input to directly
detect grasp points and directions for pulling the target object from the entanglement. (3) We propose
a transition area to reduce the degrees of entanglement. Instead of directly separating the entangled
parts in the dense clutters, the robot first picks up the entangled objects and drops them into the
transition zone. If they are disentangled, the robot transports them to the destination. Otherwise, the
robot can perform separation actions in low degrees of entanglement, enlarging the separation
efficiency},
author = {{Xinyi Zhang, 堂前 幸康, 原田 研介, Weiwei Wan}},
date = {2022-09-06},
howpublished = {日本ロボット学会学術講演会},
language = {Japanese},
title = {Learning Dexterous Bin Picking Policies for Picking and Separating Tangled-Prone Parts},
year = {2022}
}
[5] Leme Bruno, RamirezAlpizar Ixchel, Floris MarcArden Erich, 花井 亮, 堂前 幸康, 尾形 哲也, ‘Force map: an approach
on how to learn to manipulate deformable objects’. 2022.
▶︎[Bib TeX]
[5] Leme Bruno, RamirezAlpizar Ixchel, Floris MarcArden Erich, 花井 亮, 堂前 幸康, 尾形 哲也, ‘Force map: an approach
on how to learn to manipulate deformable objects’. 2022.
@misc{2022011351,
abstract = {In this paper we propose a visual representation of the contact force between the robot’s
hand and the manipulated object which we call “forceMap”. A VR system and a physics simulator are used
to generate the proposed force map.},
author = {{Leme Bruno, RamirezAlpizar Ixchel, Floris MarcArden Erich, 花井 亮, 堂前 幸康, 尾形 哲也}},
date = {2022-12-16},
howpublished = {第23回計測自動制御学会システムインテグレーション部門講演会(SI2022)},
language = {Japanese},
title = {Force map: an approach on how to learn to manipulate deformable objects},
year = {2022}
}
[6] Cristian Beltran, Damien Petit, RamirezAlpizar Georgina Ixchel, 原田 研介, ‘Curriculum Reinforcement
Learning for Industrial Insertion Tasks’. 2022.
▶︎[Bib TeX]
[6] Cristian Beltran, Damien Petit, RamirezAlpizar Georgina Ixchel, 原田 研介, ‘Curriculum Reinforcement
Learning for Industrial Insertion Tasks’. 2022.
@misc{2022011932,
abstract = {This paper presents method that combines CL with DR to improve learning efficiency and
generalization capabilities of RL control policies. The proposed method has been extensively evaluated
on a real robot on industrial insertion tasks with tight tolerances ±0.01 mm.},
author = {{Cristian Beltran, Damien Petit, RamirezAlpizar Georgina Ixchel, 原田 研介}},
date = {2022-09-06},
howpublished = {第40回日本ロボット学会学術講演会},
language = {Japanese},
title = {Curriculum Reinforcement Learning for Industrial Insertion Tasks},
year = {2022}
}
[7] Wang Zhenting, 山野辺 夏樹, 清川拓哉, RamirezAlpizar Ixchel, 世良一成, Wan Weiwei, 原田 研介, ‘Robot Assembly Planning
using Symbol and Speech Bubble Information on Graphical Instruction Manuals’. 2022.
▶︎[Bib TeX]
[7] Wang Zhenting, 山野辺 夏樹, 清川拓哉, RamirezAlpizar Ixchel, 世良一成, Wan Weiwei, 原田 研介, ‘Robot Assembly Planning
using Symbol and Speech Bubble Information on Graphical Instruction Manuals’. 2022.
@misc{2022011935,
abstract = {This paper aims to accomplish the robot assembly task by automatically understanding the
graphical instruction manual. Component, symbol, speech bubble and model number information are
extracted from the instruction images. The Assembly Task Sequences Graph (ATSG) proposed in our previous
research is extended in this paper by considering these kinds of information obtained from the
instruction images to increase the precision of the generated ATSG. The effectiveness of the proposed
method is verified by comparing the ATSG generated from four different error correction methods of five
different chairs. The result shows that the proposed method decreases the influence of wrong detection
results to generate an ATSG in a higher accuracy.},
author = {{Wang Zhenting, 山野辺 夏樹, 清川拓哉, RamirezAlpizar Ixchel, 世良一成, Wan Weiwei, 原田 研介}},
date = {2022-06-03},
howpublished = {ロボティクス・メカトロニクス 講演会 2022},
language = {Japanese},
title = {Robot Assembly Planning using Symbol and Speech Bubble Information on Graphical Instruction
Manuals},
year = {2022}
}
[8] 永田 和之, ‘AI×ロボットを活用した小規模店舗作業の自動化の試み’. 2022.
▶︎[Bib TeX]
[8] 永田 和之, ‘AI×ロボットを活用した小規模店舗作業の自動化の試み’. 2022.
@misc{2022012802,
abstract = {産総研における小規模店舗作業自動化の試みについて紹介},
author = {{永田 和之}},
date = {2022-12-08},
howpublished = {石川県次世代産業育成講座・新技術セミナー「ロボットによる自動化の最新事例」},
language = {Japanese},
title = {{AI×ロボットを活用した小規模店舗作業の自動化の試み}},
year = {2022}
}
[9] 花井 亮, 堂前 幸康, 尾形 哲也, ‘深層予測学習モデルを用いた矩形注視領域推定’. 2022.
▶︎[Bib TeX]
[9] 花井 亮, 堂前 幸康, 尾形 哲也, ‘深層予測学習モデルを用いた矩形注視領域推定’. 2022.
@misc{2022013112,
abstract = {In motion learning, attention is helpful in improving the robustness and explainability of
the learned model. This paper estimates a rectangular region of interest (ROI) using a deep predictive
learning model that learns from human demonstration data. The proposed method introduces an attention
map that depends on the task context and fits a rectangular region to the attention map. We conducted
experiments on three tasks: reaching, pen-kitting, and liquid-pouring. It was confirmed that reasonable
ROIs were estimated. We also discuss some results that are different from our expectations.},
author = {{花井 亮, 堂前 幸康, 尾形 哲也}},
date = {2022-12-16},
howpublished = {第23回計測自動制御学会システムインテグレーション部門講演会(SI2022)},
language = {Japanese},
title = {深層予測学習モデルを用いた矩形注視領域推定},
year = {2022}
}
[10] 堂前 幸康, ‘サイバーフィジカルシステムの構築手法と作業支援への応用’. 2022.
▶︎[Bib TeX]
[10] 堂前 幸康, ‘サイバーフィジカルシステムの構築手法と作業支援への応用’. 2022.
@misc{2022013247,
abstract = {産総研CPS棟で研究を行う人・機械協調に関するデジタルツイン技術とその応用展開について発表する。},
author = {{堂前 幸康}},
date = {2022-12-03},
howpublished = {軽労化研究会 第51回定例会},
language = {Japanese},
title = {サイバーフィジカルシステムの構築手法と作業支援への応用},
year = {2022}
}
[11] E. Coronado, ‘Building Digital Twins for Human-Robot Interaction using games engines, a human-centered
perspective’. 2023.
▶︎[Bib TeX]
[11] E. Coronado, ‘Building Digital Twins for Human-Robot Interaction using games engines, a
human-centered perspective’. 2023.
@misc{2023001596,
abstract = {Traditionally, technology has been developed with the focus on making machines more
efficient and independent. However, there is a growing interest in approaches that prioritize human
needs and well-being, such as Industry 5.0 and Society 5.0. These approaches emphasize the importance of
considering factors like the democratization of technology, ergonomics, user experience, and overall
benefit to humans. This talk starts with a short overview of the different perspectives that focus on
the human aspect of technology. Then, I will review the technologies commonly used to create digital
twins. Finally, I will present a software architecture that can help to make Human-Robot Interaction
applications more user-friendly and accessible for a wide range of users.},
author = {Enrique Coronado},
date = {2023-02-02},
howpublished = {AIST Artificial Intelligence Research Center International Symposium},
title = {Building Digital Twins for Human-Robot Interaction using games engines, a human-centered
perspective},
year = {2023}
}
[12] K. Makihara, Y. Domae, I. G. Ramirez-Alpizar, T. Ueshiba, and K. Harada, ‘Grasp pose detection for
deformable daily items by pix2stiffness estimation’, Advanced Robotics, vol. 36, no. 12, pp. 600–610, Jun.
2022.
▶︎[Bib TeX]
[12] K. Makihara, Y. Domae, I. G. Ramirez-Alpizar, T. Ueshiba, and K. Harada, ‘Grasp pose detection for
deformable daily items by pix2stiffness estimation’, Advanced Robotics, vol. 36, no. 12, pp. 600–610, Jun.
2022.
@article{Makihara_2022,
abstract = {While most of the existing works on grasp pose detection have assumed a complete 3D
objectmodel, this paper proposes a grasp pose detection method for unknown deformable objects, based
onvisual information. The proposed method is comprises two parts; (1) pix2stiffness estimation,
whichgenerates a stiffness map that indicates the object’s stiffness for each pixel in an image using
generativeadversarial networks (GAN), and (2) grasp pose detection, which adapts a stiffness map to
maximallyreduce the object’s deformation and avoid any possible damage. We demonstrate the validity of
theproposed method and evaluate the estimation accuracy via simulations, and in a real environment.We
also verify that the proposed approach can plan how to grasp an object using few 3D models ofobjects.},
author = {Koshi Makihara and Yukiyasu Domae and Ixchel G. Ramirez-Alpizar and Toshio Ueshiba and Kensuke
Harada},
date = {2022-6},
doi = {10.1080/01691864.2022.2078669},
issn = {0169-1864},
journal = {Advanced Robotics},
language = {Japanese},
month = {jun},
number = {12},
pages = {600--610},
publisher = {Informa {UK} Limited},
title = {Grasp pose detection for deformable daily items by pix2stiffness estimation},
volume = {36},
year = {2022}
}
[13] E. Coronado, T. Kiyokawa, G. A. G. Ricardez, I. G. Ramirez-Alpizar, G. Venture, and N. Yamanobe,
‘Evaluating quality in human-robot interaction: A systematic search and classification of performance and
human-centered factors, measures and metrics towards an industry 5.0’, Journal of Manufacturing Systems,
vol. 63, pp. 392–410, Apr. 2022.
▶︎[Bib TeX]
[13] E. Coronado, T. Kiyokawa, G. A. G. Ricardez, I. G. Ramirez-Alpizar, G. Venture, and N. Yamanobe,
‘Evaluating quality in human-robot interaction: A systematic search and classification of performance and
human-centered factors, measures and metrics towards an industry 5.0’, Journal of Manufacturing Systems,
vol. 63, pp. 392–410, Apr. 2022.
@article{Coronado_2022,
abstract = {Industry 5.0 constitutes a change of paradigm where the increase of economic benefits caused
by a never-ending increment of production is no longer the only priority. Instead, Industry 5.0
addresses social and planetary challenges caused or neglected in Industry 4.0 and below. One relevant
the most relevant challenges of Industry 5.0 is the design of human-centered smart environments (i.e.,
that prioritize human well-being while maintaining production performance). In these environments,
robots and humans will share the same space and collaborate to reach common objectives. This article
presents a literature review of the different aspects concerning the problem of quality measurement in
Human-Robot Interaction (HRI) applications for manufacturing environments. To help practitioners and new
researchers in the area, this article presents an overview of factors, metrics, and measures used in the
robotics community to evaluate performance and human well-being quality aspects in HRI applications. For
this, we performed a systematic search in relevant databases for robotics (Science Direct, IEEE Xplore,
ACM digital library, and Springer Link). We summarize and classify results extracted from 102
peer-reviewed research articles published until March 2022 in two definition models: 1) a taxonomy of
performance aspects and 2) a Venn Diagram of common human factors in HRI. Additionally, we briefly
explain the differences between often confusing or overlapped concepts in the area. We also introduce
common human factors evaluated by the robotics community and identify seven emergent research topics
which can have a relevant impact on Industry 5.0.},
author = {Enrique Coronado and Takuya Kiyokawa and Gustavo A. Garcia Ricardez and Ixchel G.
Ramirez-Alpizar and Gentiane Venture and Natsuki Yamanobe},
date = {2022-4},
doi = {10.1016/j.jmsy.2022.04.007},
issn = {0278-6125},
journal = {Journal of Manufacturing Systems},
language = {Japanese},
month = {apr},
pages = {392--410},
publisher = {Elsevier {BV}},
title = {Evaluating quality in human-robot interaction: A systematic search and classification of
performance and human-centered factors, measures and metrics towards an industry 5.0},
volume = {63},
year = {2022}
}
[14] J. Xu, Y. Domae, T. Ueshiba, W. Wan, and K. Harada, ‘Planning a Minimum Sequence of Positions for
Picking Parts From Multiple Trays Using a Mobile Manipulator’, IEEE Access, vol. 9, pp. 165526–165541, 2021.
▶︎[Bib TeX]
[14] J. Xu, Y. Domae, T. Ueshiba, W. Wan, and K. Harada, ‘Planning a Minimum Sequence of Positions for
Picking Parts From Multiple Trays Using a Mobile Manipulator’, IEEE Access, vol. 9, pp. 165526–165541,
2021.
@article{Xu_2021,
abstract = {Mobile manipulators are able to operate in a large workspace, and have the potential to
replace human workers to perform a sequence of pick-and-place tasks at separate locations. Many existing
works optimize the base position or manipulator configuration for a single manipulation task, however,
very few of them consider a sequence of tasks. In this paper, we present a planner that plans a minimum
sequence of base positions for a mobile manipulator to robustly collect objects stored in multiple
trays. We use inverse kinematics to determine the base region where a mobile manipulator can grasp the
target objects stored in a tray, and move the mobile manipulator to the intersections of base regions to
reduce the operation time for moving the base. We ensure robustness by only considering the intersection
whose radius of the inscribed circle is larger than the base positioning error. Then the minimization of
the number of base positions is formulated as a 0?1 knapsack problem. Besides, considering different
object placements in the tray, we analyze feasible policies for dynamically updating the base sequence
based on either the remaining objects or the target objects to be picked. In the experiment, we examine
our planner on various scenarios, including different object placements: (1) Regularly placed toy
objects; (2) Randomly placed industrial parts; and different implementation policies: (1) Apply globally
static base positions; (2) Dynamically update the base positions. The experiment results show that the
time for moving the base decreases by 11.22 seconds (29.37%) to 17.26 seconds (36.77%) by reducing one
base movement, and demonstrate the feasibility and potential of the proposed method.},
author = {Jingren Xu and Yukiyasu Domae and Toshio Ueshiba and Weiwei Wan and Kensuke Harada},
date = {2022-2},
doi = {10.1109/access.2021.3135374},
issn = {2169-3536},
journal = {{IEEE} Access},
language = {Japanese},
pages = {165526--165541},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
title = {Planning a Minimum Sequence of Positions for Picking Parts From Multiple Trays Using a Mobile
Manipulator},
volume = {9},
year = {2021}
}
[15] K. Kase, C. Utsumi, Y. Domae, and T. Ogata, ‘Use of Action Label in Deep Predictive Learning for Robot
Manipulation’, in 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2022.
▶︎[Bib TeX]
[15] K. Kase, C. Utsumi, Y. Domae, and T. Ogata, ‘Use of Action Label in Deep Predictive Learning for
Robot Manipulation’, in 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),
2022.
@inproceedings{Kase_2022,
abstract = {Various forms of human knowledge can be explicitly used to enhance deep robot learning from
demonstrations. Annotation of subtasks from task segmentation is one type of human symbolism and
knowledge. Annotated subtasks can be referred to as action labels, which are more primitive symbols that
can be building blocks for more complex human reasoning, like language instructions. However, action
labels are not widely used to boost learning processes because of problems that include (1) real-time
annotation for online manipulation, (2) temporal inconsistency by annotators, (3) difference in data
characteristics of motor commands and action labels, and (4) annotation cost. To address these problems,
we propose the Gated Action Motor Predictive Learning (GAMPL) framework to leverage action labels for
improved performance. GAMPL has two modules to obtain soft action labels compatible with motor commands
and to generate motion. In this study, GAMPL is evaluated for towel-folding manipulation tasks in a real
environment with a six degrees-of-freedom (6 DoF) robot and shows improved generalizability with action
labels.},
author = {Kei Kase and Chikara Utsumi and Yukiyasu Domae and Tetsuya Ogata},
booktitle = {2022 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})},
date = {2022-0},
doi = {10.1109/iros47612.2022.9982091},
publisher = {IEEE},
title = {Use of Action Label in Deep Predictive Learning for Robot Manipulation},
year = {2022}
}
[16] K. Takata, T. Kiyokawa, I. G. Ramirez-Alpizar, N. Yamanobe, W. Wan, and K. Harada, ‘Efficient
Task/Motion Planning for a Dual-arm Robot from Language Instructions and Cooking Images’, in 2022 IEEE/RSJ
International Conference on Intelligent Robots and Systems (IROS), 2022, pp. 12058–12065.
▶︎[Bib TeX]
[16] K. Takata, T. Kiyokawa, I. G. Ramirez-Alpizar, N. Yamanobe, W. Wan, and K. Harada, ‘Efficient
Task/Motion Planning for a Dual-arm Robot from Language Instructions and Cooking Images’, in 2022 IEEE/RSJ
International Conference on Intelligent Robots and Systems (IROS), 2022, pp. 12058–12065.
@inproceedings{Takata_2022,
abstract = {When generating robot motions based on instructions such as cooking recipes, ambiguity of
the instructions and lack of necessary information are problematic for the robot. To solve this problem,
we propose an efficient motion planning approach for a dual-arm robot by constructing a graph network
representing a motion sequence based on a recipe consisting of verbal instructions and cooking images. A
functional unit is generated based on the linguistic instructions in the recipe. Since most recipes lack
the necessary information for executing the motion, we first consider extracting the information about
the cooking motion like cutting from the food images of the recipe and supplementing it. In addition, to
supplement the actions that humans perform unconsciously, we generate functional units for actions not
explicitly mentioned in the recipe based on the current situation of the cooking process, and then
connect them to the functional units generated from the recipe. Moreover, during the connection we
consider the motion of the robot's arms in parallel for an efficient execution of the recipe, similar to
those of a human. Through experiments, we demonstrate that for a given recipe, the proposed method can
be used to generate a cooking sequence with the supplementary information needed, and executed by a
dual-arm robot. The results show that the proposed method is effective and can simplify robot teaching
in cooking tasks.},
author = {Kota Takata and Takuya Kiyokawa and Ixchel G. Ramirez-Alpizar and Natsuki Yamanobe and Weiwei
Wan and Kensuke Harada},
booktitle = {2022 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})},
date = {2022-0},
doi = {10.1109/iros47612.2022.9981280},
issn = {2153-0866},
pages = {12058--12065},
publisher = {IEEE},
title = {Efficient Task/Motion Planning for a Dual-arm Robot from Language Instructions and Cooking
Images},
year = {2022}
}
[17] K. Kase, A. Tateishi, and T. Ogata, ‘Robot Task Learning With Motor Babbling Using Pseudo Rehearsal’,
in IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2022, vol. 7, pp. 8377–8382.
▶︎[Bib TeX]
[17] K. Kase, A. Tateishi, and T. Ogata, ‘Robot Task Learning With Motor Babbling Using Pseudo Rehearsal’,
in IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2022, vol. 7, pp.
8377–8382.
@inproceedings{Kase_2022,
abstract = {The paradigm of deep robot learning from demonstrations allows robots to solve complex
manipulation tasks by capturing motor skills from given demonstrations; however, collecting
demonstrations can be costly. As an alternative, robots can acquire embodiment and motor skills by
randomly moving their bodies, which is referred to as motor babbling. Motor babbling data provide
relatively inexpensive demonstrations and can be used to enhance the generalizability of robot motions,
but they are often used for pre-training or joint training with target task demonstrations. This study
focused on the concept of continual learning via pseudo-rehearsal and retaining the embodiment
information acquired from motor babbling data for effective task learning. Pseudo-rehearsal has
beneficial features that allow robot models to be retrained and distributed without access to the motor
babbling dataset. In this paper, we propose a pseudo-rehearsal framework that can be jointly trained
with task trajectories and rehearsed motor babbling trajectories. Using our proposed method, robots can
retain motor skills from motor babbling and exhibit improved performance in task execution.},
author = {Kei Kase and Ai Tateishi and Tetsuya Ogata},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems {(IROS)}},
date = {2022-0},
doi = {10.1109/lra.2022.3187517},
journal = {{IEEE} Robotics and Automation Letters},
language = {Japanese},
month = {jul},
number = {3},
pages = {8377--8382},
publisher = {IEEE},
title = {Robot Task Learning With Motor Babbling Using Pseudo Rehearsal},
volume = {7},
year = {2022}
}
[18] N. Shirakura, R. Takase, N. Yamanobe, Y. Domae, and T. Ogata, ‘Time Pressure Based Human Workload and
Productivity Compatible System for Human-Robot Collaboration’, in 2022 IEEE 18th International Conference on
Automation Science and Engineering (CASE), 2022, pp. 659–666.
▶︎[Bib TeX]
[18] N. Shirakura, R. Takase, N. Yamanobe, Y. Domae, and T. Ogata, ‘Time Pressure Based Human Workload and
Productivity Compatible System for Human-Robot Collaboration’, in 2022 IEEE 18th International Conference
on Automation Science and Engineering (CASE), 2022, pp. 659–666.
@inproceedings{Shirakura_2022,
abstract = {Diversity and inclusion in industries is a new and challenging problem. Owing to declining
birthrates and aging populations, it is becoming difficult to recruit workers at various industrial
sites. Automation is one solution to overcome this problem. However, technical limitations make it
difficult to automate all industries entirely. Therefore, it is expected that more diverse people, such
as the elderly and those with poor skills for necessary work, will participate in various industries.
Human-robot collaboration (HRC) is an approach that can balance the loads of humans and machines while
allowing for their limitations. If robots can understand human workloads and work capacity sufficiently,
it will be possible to collaborate while employing {"}diversified people{"} and maintaining
productivity. This paper presents a human-robot collaboration (HRC) system which balance human workload
and system productivity by using time pressure. To adjust time pressure using a real robot, we introduce
a task scheduler that determines the intervention timing considering the movement limitation of the
system and an interaction system using perception modality according to the work content. Our system was
evaluated through a subjective experiment. In the experiment, workload and productivity were estimated
using a physiological signal such as pupil diameter, subjective evaluation, and operation time. The
result shows that the proposed HRC system can control time pressure, which can affect human workload and
productivity.},
author = {Naoki Shirakura and Ryuichi Takase and Natsuki Yamanobe and Yukiyasu Domae and Tetsuya Ogata},
booktitle = {2022 {IEEE} 18th International Conference on Automation Science and Engineering ({CASE})},
date = {2022-8},
doi = {10.1109/CASE49997.2022.9926685},
issn = {2161-8089},
pages = {659--666},
publisher = {IEEE},
title = {Time Pressure Based Human Workload and Productivity Compatible System for Human-Robot
Collaboration},
year = {2022}
}
[19] H. Nagahama, I. G. Ramirez-Alpizar, and K. Harada, ‘Food Arrangement Framework for Cooking Robots’, in
2022 IEEE/ASME International Conference on Advanced Intelligent Mechatronics (AIM), 2022, pp. 1179–1184.
▶︎[Bib TeX]
[19] H. Nagahama, I. G. Ramirez-Alpizar, and K. Harada, ‘Food Arrangement Framework for Cooking Robots’,
in 2022 IEEE/ASME International Conference on Advanced Intelligent Mechatronics (AIM), 2022, pp.
1179–1184.
@inproceedings{Nagahama_2022,
abstract = {In this paper, we propose a food arrangement framework for a robot to carry out the final
arrangement (presentation) of food in plates. We trained a CNN using a dataset gathered through Amazon
Mechanical Turk, where people is ask to choose the best food arrangement between a pair of pictures.
Then, the CNN evaluates how good the arrangement is. If the evaluation is under a given threshold, then
the system will move some of the food according to our rearrangement algorithm.},
author = {Hoshito Nagahama and Ixchel G. Ramirez-Alpizar and Kensuke Harada},
booktitle = {2022 {IEEE}/{ASME} International Conference on Advanced Intelligent Mechatronics ({AIM})},
date = {2022-8},
doi = {10.1109/aim52237.2022.9863320},
issn = {2159-6255},
month = {jul},
pages = {1179--1184},
publisher = {{IEEE}},
title = {Food Arrangement Framework for Cooking Robots},
year = {2022}
}
[20] J. Qu, S. Miwa, and Y. Domae, ‘Interpretable Navigation Agents Using Attention-Augmented Memory’, in
2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC), 2022, pp. 2575–2582.
▶︎[Bib TeX]
[20] J. Qu, S. Miwa, and Y. Domae, ‘Interpretable Navigation Agents Using Attention-Augmented Memory’, in
2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC), 2022, pp. 2575–2582.
@inproceedings{Qu_2022,
abstract = {Deep reinforcement learning (DRL) has achieved great success in a variety of domains from
games to complex tasks. As sequential decision-making problems that involve visual perceptions, DRL
approaches have been used in a wide range of robot navigation applications where agents need to perform
long-horizon tasks in partial observable environments. In such applications decision making depends on
observations in the past and to memorize and utilize the long-term history is crucial. Memory-based
models and the Transformer model have been proposed to solve these problems. However, in exchange for
the high performances of DRL, its decisionmaking process is unclear and difficult to interpret.
Existing approaches cannot uncover the long-term history information, which is important for
decision-making in long-horizon tasks.To address these concerns, we propose a novel deep RL model using
Attention-Augmented Memory (AAM) to interpret the long-horizon decision-making process. This model adds
representation states of past observations to a memory and uses a soft attention bottleneck to force the
agent to make decisions based on task-relevant past observations in the memory together with the current
observation. We apply AAM model the navigation problem of the Labyrinth [Beattie et al., 2014] to
interpret how the agent learns to make decisions. We generate attention map and saliency map to show (1)
what the agent attends to from the memory. (2) what the agent highlights in the current observation. We
also compare the saliency change with baseline model and find that our method sis more resilient to
noise of visual observations. To summarize, our novel method demonstrates an interpretable and
noiserobust deep RL approach for long-horizon tasks.},
author = {Jia Qu and Shotaro Miwa and Yukiyasu Domae},
booktitle = {2022 {IEEE} International Conference on Systems, Man, and Cybernetics ({SMC})},
date = {2022-0},
doi = {10.1109/smc53654.2022.9945462},
pages = {2575--2582},
publisher = {IEEE},
title = {Interpretable Navigation Agents Using Attention-Augmented Memory},
year = {2022}
}
[21] S. Capy et al., ‘Yokobo: A Robot to Strengthen Links Amongst Users With Non-Verbal Behaviours’,
Machines, vol. 10, no. 8, p. 708, Aug. 2022.
▶︎[Bib TeX]
[21] S. Capy et al., ‘Yokobo: A Robot to Strengthen Links Amongst Users With Non-Verbal Behaviours’,
Machines, vol. 10, no. 8, p. 708, Aug. 2022.
@article{Capy_2022,
abstract = {Yokobo is a robject, designed using the principle of slow technology and aims at
strengthening the link between members of a couple, greeting people at the entrance, mirroring their
interaction and the environment around them. It was constructed by applying the notions of
human-robot-human interaction. Yokobo was tested in the GVlab to evaluate its technical robustness and
motion perception ahead of future long-term experiments with the target population. The results show
that Yokobo can sustain long-term interaction and serve as a welcoming partner},
author = {Sim{\'{e}}on Capy and Pablo Osorio and Shohei Hagane and Corentin Aznar and Dora Garcin and
Enrique Coronado and Dominique Deuff and Ioana Ocnarescu and Isabelle Milleville and Gentiane Venture},
date = {2022-8},
doi = {10.3390/machines10080708},
issn = {2075-1702},
journal = {Machines},
language = {Japanese},
month = {aug},
number = {8},
pages = {708},
publisher = {{MDPI} {AG}},
title = {Yokobo: A Robot to Strengthen Links Amongst Users With Non-Verbal Behaviours},
volume = {10},
year = {2022}
}
[22] J. Qu, S. Miwa, and Y. Domae, ‘Learning Landmark-Oriented Subgoals for Visual Navigation Using
Trajectory Memory’, in 2022 IEEE Symposium Series on Computational Intelligence (SSCI), 2022, pp. 708–714.
▶︎[Bib TeX]
[22] J. Qu, S. Miwa, and Y. Domae, ‘Learning Landmark-Oriented Subgoals for Visual Navigation Using
Trajectory Memory’, in 2022 IEEE Symposium Series on Computational Intelligence (SSCI), 2022, pp. 708–714.
@inproceedings{Qu_2022,
abstract = {In many deep reinforcement learning (DRL) applications, the agents must perform complex and
long-horizon tasks which are still challenging in DRL due to temporally extended tasks with sparse
rewards. Goal-conditioned hierarchical reinforcement learning (HRL) is a promising approach to control
at multiple time scales via a hierarchical structure with subgoals. One of the key issues of goal-
conditioned HRL is the subgoals definition. In this work we propose a DRL model for learning
landmark-oriented subgoals using attention-augmented trajectory memory. In our approach the agent is
trained to make decisions based on both 1) current perception which is a shortterm temporal history
derived from a vanilla Long Short-Term Memory (LSTM) and 2) trajectory memory which represents a
contextual summary of long-term historical LSTM states augmented by attention. The experiment on visual
navigation task shows that the short-term LSTM state of the current perception module extracts landmark
subgoals as clusters which correspond to lower-level policies and the long-term context states of
trajectory memory extracts subgoal-transitions which correspond to higher-level policies. Furthermore,
our method demonstrates superior adaptability to environmental changes.},
author = {Jia Qu and Shotaro Miwa and Yukiyasu Domae},
booktitle = {2022 {IEEE} Symposium Series on Computational Intelligence ({SSCI})},
date = {2022-2},
doi = {10.1109/ssci51031.2022.10022285},
pages = {708--714},
publisher = {IEEE},
title = {Learning Landmark-Oriented Subgoals for Visual Navigation Using Trajectory Memory},
year = {2022}
}
[23] 東 和樹, 小山 佳祐, 小澤 隆太, 永田 和之, 万 偉偉, 清川 拓哉, 原田 研介, ‘多様な物体操作を可能にする流体ネットワークを用いたシナジーハンド’, 日本ロボット学会誌, vol. 40,
no. 6, pp. 635–638, 2022.
▶︎[Bib TeX]
[23] 東 和樹, 小山 佳祐, 小澤 隆太, 永田 和之, 万 偉偉, 清川 拓哉, 原田 研介, ‘多様な物体操作を可能にする流体ネットワークを用いたシナジーハンド’, 日本ロボット学会誌, vol.
40, no. 6, pp. 635–638, 2022.
@article{2022010074,
abstract = {多指ハンドに要求される物体形状への汎用性や操作能力を評価するため,流体ネットワークを搭載した 3
指ハンドを開発し,多様な物体の把持や精密な操作を要求されるタスクを実行する.さらに,提案する流体ネットワークを構成する蛇腹ソフアクチュエータについて,その動作特性を検証する.},
author = {{東 和樹, 小山 佳祐, 小澤 隆太, 永田 和之, 万 偉偉, 清川 拓哉, 原田 研介}},
date = {2022-9},
journal = {日本ロボット学会誌},
language = {Japanese},
number = {6},
pages = {635--638},
publisher = {日本ロボット学会},
title = {多様な物体操作を可能にする流体ネットワークを用いたシナジーハンド},
volume = {40},
year = {2022}
}
[24] T. Motoda, D. Petit, T. Nishi, K. Nagata, W. Wan, and K. Harada, ‘Shelf Replenishment Based on Object
Arrangement Detection and Collapse Prediction for Bimanual Manipulation’, Robotics, vol. 11, no. 5, p. 104,
Sep. 2022.
▶︎[Bib TeX]
[24] T. Motoda, D. Petit, T. Nishi, K. Nagata, W. Wan, and K. Harada, ‘Shelf Replenishment Based on Object
Arrangement Detection and Collapse Prediction for Bimanual Manipulation’, Robotics, vol. 11, no. 5, p.
104, Sep. 2022.
@article{Motoda_2022,
abstract = {This paper presents an approach for generating a safe replenishment process from a single
depth image, which is provided as an input to two networks to identify arrangement patterns and predict
the occurrence of collapsing objects. The proposed inference-based strategy provides an appropriate
decision and course of action on whether to create an insertion space while considering the safety of
the shelf content.},
author = {Tomohiro Motoda and Damien Petit and Takao Nishi and Kazuyuki Nagata and Weiwei Wan and
Kensuke Harada},
date = {2022-9},
doi = {10.3390/robotics11050104},
issn = {2218-6581},
journal = {Robotics},
language = {Japanese},
month = {sep},
number = {5},
pages = {104},
publisher = {{MDPI} {AG}},
title = {Shelf Replenishment Based on Object Arrangement Detection and Collapse Prediction for Bimanual
Manipulation},
volume = {11},
year = {2022}
}
[25] F. von Drigalski et al., ‘Team O2AC at the world robot summit 2020: towards jigless, high-precision
assembly’, Advanced Robotics, vol. 36, no. 22, pp. 1213–1227, Nov. 2022.
▶︎[Bib TeX]
[25] F. von Drigalski et al., ‘Team O2AC at the world robot summit 2020: towards jigless, high-precision
assembly’, Advanced Robotics, vol. 36, no. 22, pp. 1213–1227, Nov. 2022.
@article{von_Drigalski_2022,
abstract = {High-mix, low-volume assembly has been a long-standing challenge for robot systems. We
present a complete 2-armed robot system with general-purpose grippers and hand-held tools, which can
perform assembly for a wide variety of objects with tight tolerances. The complete source code and
3D-printed- part designs are available for download and can be executed in simulation and with physical
robots using a regular desktop computer. Furthermore, we present the designs of multiple tools which can
be grasped and used by the robots in a human-like fashion. The system uses no parts-specific jigs and
grippers to be applied to many different parts. It achieves high precision and reliability by using the
environment and gripper surfaces to position the grasped objects. The system obtained 3rd place and the
Japanese Society for Artificial Intelligence Award at the World Robot Summit 2020 Assembly Challenge.},
author = {Felix von Drigalski and Cristian C. Beltran-Hernandez and Chisato Nakashima and Zhengtao Hu
and Shuichi Akizuki and Toshio Ueshiba and Manabu Hashimoto and Kazumi Kasaura and Yukiyasu Domae and
Weiwei Wan and Kensuke Harada},
date = {2022-1},
doi = {10.1080/01691864.2022.2138541},
issn = {0169-1864},
journal = {Advanced Robotics},
language = {Japanese},
month = {nov},
number = {22},
pages = {1213--1227},
publisher = {Informa {UK} Limited},
title = {Team O2AC at the world robot summit 2020: towards jigless, high-precision assembly},
volume = {36},
year = {2022}
}
[26] S. Capy et al., ‘Expanding the Frontiers of Industrial Robots beyond Factories: Design and in the Wild
Validation’, Machines, vol. 10, no. 12, pp. 1–20, Dec. 2022.
▶︎[Bib TeX]
[26] S. Capy et al., ‘Expanding the Frontiers of Industrial Robots beyond Factories: Design and in the
Wild Validation’, Machines, vol. 10, no. 12, pp. 1–20, Dec. 2022.
@article{Capy_2022,
abstract = {Robots able to coexist and interact with humans are key elements for Society 5.0. To produce
the right expectations towards robots, it will be necessary to expose the true current capabilities of
robots to the general public. In this context, Human-Robot Interaction (HRI) in the wild emerges as a
relevant paradigm. In this article, we affront the challenge of bringing an industrial robot (NEXTAGE
Open) outside factories and laboratories to be used in a public setting. We designed a multi-modal
interactive scenario that integrates state-of-the-art sensory devices, deep learning methods for
perception, and a human-machine graphical interface that monitors the system and provides useful
information to participants. The main objective of the presented work is to build a robust and fully
autonomous robotic system able to: 1) share the same space that humans, 2) work in a public and crowded
space, and 3) provide an intuitive and engaging experience for a robotic exposition. In addition, we
measured the attitudes, perceptions, expectations, and emotional reactions of volunteers. Results
suggest that participants considered our proposed scenario as enjoyable, safe, and interesting.},
author = {Sim{\'{e}}on Capy and Liz Rincon and Enrique Coronado and Shohei Hagane and Seiji Yamaguchi
and Victor Leve and Yuichiro Kawasumi and Yasutoshi Kudou and Gentiane Venture},
date = {2022-2},
doi = {10.3390/machines10121179},
issn = {2075-1702},
journal = {Machines},
language = {Japanese},
month = {dec},
number = {12},
pages = {1--20},
publisher = {{MDPI} {AG}},
title = {Expanding the Frontiers of Industrial Robots beyond Factories: Design and in the Wild
Validation},
volume = {10},
year = {2022}
}
[27] K. Takata, T. Kiyokawa, N. Yamanobe, I. G. Ramirez-Alpizar, W. Wan, and K. Harada, ‘Graph-Based
Framework on Bimanual Manipulation Planning from Cooking Recipe’, Robotics, vol. 11, no. 6, p. 123, Nov.
2022.
▶︎[Bib TeX]
[27] K. Takata, T. Kiyokawa, N. Yamanobe, I. G. Ramirez-Alpizar, W. Wan, and K. Harada, ‘Graph-Based
Framework on Bimanual Manipulation Planning from Cooking Recipe’, Robotics, vol. 11, no. 6, p. 123, Nov.
2022.
@article{Takata_2022,
abstract = {this paper proposes a graph based approach on bimanual cooking motion planning from a
cooking recipe. In our approach, we first decompose the cooking recipe into the graph elements. Then, we
try to connect the graph elements taking into account the attributes of the input/output nodes. If two
graph elements cannot be connected each other, we search for a graph element that can be inserted
between them from the database of graph elements. Since the constructed graph includes the whole
sequence of robot’s motion performing the cooking task, we can generate a task sequence of a dual-arm
manipulator simultaneously performing two different tasks by using two arms. Through experimental study,
we show that it is possible to generate robot motion from a cooking recipe and perform the cooking
motions while simultaneously moving the left and right arms.},
author = {Kota Takata and Takuya Kiyokawa and Natsuki Yamanobe and Ixchel G. Ramirez-Alpizar and Weiwei
Wan and Kensuke Harada},
date = {2022-1},
doi = {10.3390/robotics11060123},
issn = {2218-6581},
journal = {Robotics},
language = {Japanese},
month = {nov},
number = {6},
pages = {123},
publisher = {{MDPI} {AG}},
title = {Graph-Based Framework on Bimanual Manipulation Planning from Cooking Recipe},
volume = {11},
year = {2022}
}
[28] E. Coronado, S. Itadera, and I. G. Ramirez-Alpizar, ‘Integrating Virtual, Mixed, and Augmented Reality
to Human-Robot Interaction Applications Using Game Engines: A Brief Review of Accessible Software Tools and
Frameworks’, Applied Sciences, vol. 13, no. 3, p. 1292, Jan. 2023.
▶︎[Bib TeX]
[28] E. Coronado, S. Itadera, and I. G. Ramirez-Alpizar, ‘Integrating Virtual, Mixed, and Augmented
Reality to Human-Robot Interaction Applications Using Game Engines: A Brief Review of Accessible Software
Tools and Frameworks’, Applied Sciences, vol. 13, no. 3, p. 1292, Jan. 2023.
@article{Coronado_2023,
abstract = {This article identifies and summarizes software tools and frameworks proposed in the
Human?Robot Interaction (HRI) literature for developing extended reality (XR) experiences using game
engines. This review includes primary studies proposing Virtual Reality (VR), Augmented Reality (AR),
and Mixed Reality (MR) solutions where humans can control or interact with real robotic platforms using
devices that extend the user’s reality. The objective of this article is not to present an extensive
list of applications and tools. Instead, we present recent, relevant, common, and accessible frameworks
and software tools implemented in research articles published in high-impact robotics conferences and
journals. For this, we searched papers published during a seven-years period between 2015 and 2022 in
relevant databases for robotics (Science Direct, IEEE Xplore, ACM digital library, Springer Link, and
Web of Science). Additionally, we present and classify the application context of the reviewed articles
in four groups: social robotics, programming of industrial robots, teleoperation of industrial robots,
and Human?Robot collaboration (HRC).},
author = {Enrique Coronado and Shunki Itadera and Ixchel G. Ramirez-Alpizar},
date = {2023-1},
doi = {10.3390/app13031292},
issn = {2076-3417},
journal = {Applied Sciences},
language = {Japanese},
month = {jan},
number = {3},
pages = {1292},
publisher = {{MDPI} {AG}},
title = {Integrating Virtual, Mixed, and Augmented Reality to Human-Robot Interaction Applications Using
Game Engines: A Brief Review of Accessible Software Tools and Frameworks},
volume = {13},
year = {2023}
}
[29] K. Kase, A. Tateishi, and T. Ogata, ‘Robot Task Learning With Motor Babbling Using Pseudo Rehearsal’,
IEEE Robotics and Automation Letters, vol. 7, no. 3, pp. 8377–8382, Jul. 2022.
▶︎[Bib TeX]
[29] K. Kase, A. Tateishi, and T. Ogata, ‘Robot Task Learning With Motor Babbling Using Pseudo Rehearsal’,
IEEE Robotics and Automation Letters, vol. 7, no. 3, pp. 8377–8382, Jul. 2022.
@article{Kase_2022,
abstract = {The paradigm of deep robot learning from demonstrations allows robots to solve complex
manipulation tasks by capturing motor skills from given demonstrations; however, collecting
demonstrations can be costly. As an alternative, robots can acquire embodiment and motor skills by
randomly moving their bodies, which is referred to as motor babbling. Motor babbling data provide
relatively inexpensive demonstrations and can be used to enhance the generalizability of robot motions,
but they are often used for pre-training or joint training with target task demonstrations. This study
focused on the concept of continual learning via pseudo-rehearsal and retaining the embodiment
information acquired from motor babbling data for effective task learning. Pseudo-rehearsal has
beneficial features that allow robot models to be retrained and distributed without access to the motor
babbling dataset. In this paper, we propose a pseudo-rehearsal framework that can be jointly trained
with task trajectories and rehearsed motor babbling trajectories. Using our proposed method, robots can
retain motor skills from motor babbling and exhibit improved performance in task execution.},
author = {Kei Kase and Ai Tateishi and Tetsuya Ogata},
date = {2022-1},
doi = {10.1109/lra.2022.3187517},
issn = {2377-3766},
journal = {IEEE Robotics and Automation Letters},
month = {jul},
number = {3},
pages = {8377--8382},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
title = {Robot Task Learning With Motor Babbling Using Pseudo Rehearsal},
volume = {7},
year = {2022}
}
[30] X. Zhang, Y. Domae, W. Wan, and K. Harada, ‘Learning Efficient Policies for Picking Entangled Wire
Harnesses: An Approach to Industrial Bin Picking’, IEEE Robotics and Automation Letters, vol. 8, no. 1, pp.
73–80, Jan. 2023.
▶︎[Bib TeX]
[30] X. Zhang, Y. Domae, W. Wan, and K. Harada, ‘Learning Efficient Policies for Picking Entangled Wire
Harnesses: An Approach to Industrial Bin Picking’, IEEE Robotics and Automation Letters, vol. 8, no. 1,
pp. 73–80, Jan. 2023.
@article{Zhang_2023,
abstract = {Wire harnesses are essential connecting components in manufacturing industry but are
challenging to be automated in industrial tasks such as bin picking. They are long, flexible and tend to
get entangled when randomly placed in a bin. This makes the robot struggle to pick a single one from the
clutter. Besides, modeling wire harnesses is difficult due to the complex structures of combining
deformable cables with rigid components, making it unsuitable for training or collecting data in
simulation. In this work, instead of directly lifting wire harnesses, we proposed to grasp and extract
the target following circle-like trajectories until it is separated from the clutter. We learn a policy
from real-world data to infer the optimal action and grasp from visual observation. Our policy enables
the robot to perform non-tangle pickings efficiently by maximizing success rates and reducing the
execution time. To evaluate our policy, we present a set of real-world experiments on picking wire
harnesses. Results show a significant improvement in success rates from 49.2% to 84.6% over the
tangle-agnostic bin picking method. We also evaluate the effectiveness of our policy under different
clutter scenarios using unseen types of wire harnesses. The proposed method is expected to provide a
practical solution for automating manufacturing processes with wire harnesses.},
author = {Xinyi Zhang and Yukiyasu Domae and Weiwei Wan and Kensuke Harada},
date = {2023-1},
doi = {10.1109/lra.2022.3222995},
issn = {2377-3766},
journal = {{IEEE} Robotics and Automation Letters},
month = {jan},
number = {1},
pages = {73--80},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
title = {Learning Efficient Policies for Picking Entangled Wire Harnesses: An Approach to Industrial Bin
Picking},
volume = {8},
year = {2023}
}
[31] F. Erich, B. Bourreau, C. K. Tan, G. Caron, Y. Yoshiyasu, and N. Ando, ‘Neural Scanning: Rendering and
determining geometry of household objects using Neural Radiance Fields’, in 2023 IEEE/SICE International
Symposium on System Integration (SII), 2023.
▶︎[Bib TeX]
[31] F. Erich, B. Bourreau, C. K. Tan, G. Caron, Y. Yoshiyasu, and N. Ando, ‘Neural Scanning: Rendering
and determining geometry of household objects using Neural Radiance Fields’, in 2023 IEEE/SICE
International Symposium on System Integration (SII), 2023.
@inproceedings{Erich_2023,
abstract = {In this paper we present a hardware and software framework for Neural Scanning of household
objects using Neural Radiance Fields (NeRF). The NeRF technique tries to learn a probabilistic
representation of radiance and density, that can be used to render objects and to export objects’
geometry. Our framework allows for easy scanning of the objects by rotating the object while using
cameras in a static position. The objects we scan are mostly taken from the Yale- CMU-Berkeley (YCB)
object set, and we release our scans as part of a public dataset.},
author = {Floris Erich and Baptiste Bourreau and Chun Kwang Tan and Guillaume Caron and Yusuke Yoshiyasu
and Noriaki Ando},
booktitle = {2023 {IEEE}/{SICE} International Symposium on System Integration ({SII})},
date = {2023-1},
doi = {10.1109/sii55687.2023.10039147},
publisher = {IEEE},
title = {Neural Scanning: Rendering and determining geometry of household objects using Neural Radiance
Fields},
year = {2023}
}
2021年度
[1]廣瀬 颯太, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫. DQNを用いた視覚からの動作生成に基づくシーンに隠された目標物体の探索.
画像センシングシンポジウム,
2021.
▶︎[Bib TeX]
[1]廣瀬 颯太, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫.
DQNを用いた視覚からの動作生成に基づくシーンに隠された目標物体の探索. 画像センシングシンポジウム, 2021.
[Bib TeX]
@misc{2021003125,
author = {{廣瀬 颯太, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫}},
date = {2021-06-09},
title = {{DQN}を用いた視覚からの動作生成に基づくシーンに隠された目標物体の探索},
howpublished = {画像センシングシンポジウム},
language = {Japanese},
abstract = {対象物のピッキングから複数の対象物のクラスタを払い,対象物を発見する動作の切り替え戦略を提案する.提案手法はDQNをベースにしており,1
回の払い動作で複数のクラスタから対象物を見つけることが可能.シミュレーション環境にて検証を行い,提案手法の有効性を確認した.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[2]Enrique Coronado, Kosuke Fukuda, Ixchel G. Ramirez-Alpizar, Natsuki Yamanobe, Gentiane Venture, and
Kensuke Harada. Assembly action understanding from fine-grained hand motions, a multi-camera and deep
learning approach. International Conference on Intelligent Robots and Systems, 2021.
▶︎[Bib TeX]
[2]Enrique Coronado, Kosuke Fukuda, Ixchel G. Ramirez-Alpizar, Natsuki Yamanobe,
Gentiane Venture, and Kensuke Harada. Assembly action understanding from fine-grained hand motions, a
multi-camera and deep learning approach. International Conference on Intelligent Robots and Systems,
2021.
[Bib TeX]
@misc{2021003384,
author = {Enrique Coronado and Kosuke Fukuda and Ixchel G. Ramirez-Alpizar and Natsuki Yamanobe and
Gentiane Venture and Kensuke Harada},
date = {2021-09-27},
title = {Assembly Action Understanding from Fine-Grained Hand Motions, a Multi-camera and Deep
Learning approach},
doi = {10.1109/IROS51168.2021.9636715},
howpublished = {International Conference on Intelligent Robots and Systems},
language = {English},
url = {https://www.semanticscholar.org/paper/bb8dba5ffdd4093c03fa6878246013f15b7da372},
abstract = {This article presents a novel software architecture enabling the analysis of assembly
actions from fine-grained hand motions. Unlike previous works that compel humans to wear ad-hoc
devices or visual markers in the human body, our approach enables users to move without additional
burdens. Modules developed are able to: (i) reconstruct the 3D motions of body and hands keypoints
using multi-camera systems; (ii) recognize objects manipulated by humans, and (iii) analyze the
relationship between the human motions and the manipulated objects. We implement different solutions
based on OpenPose and Mediapipe for body and hand keypoint detection. Additionally, we discuss the
suitability of these solutions for enabling real-time data processing. We also propose a novel
method
using Long Short-Term Memory (LSTM) deep neural networks to analyze the relationship between the
detected human motions and manipulated objects. Experimental validations show the superiority of the
proposed approach against previous works based on Hidden Markov Models (HMMs).},
booktitle = {2021 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems
({IROS})},
creationdate = {2022-08-24T17:17:56},
publisher = {{IEEE}},
venue = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2021}
}
[3]牧原 昂志, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫. pix2stiffnessによる柔軟物体の把持位置検出. 第27回 画像センシングシンポジウム,
2021.
▶︎[Bib TeX]
[3]牧原 昂志, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫. pix2stiffnessによる柔軟物体の把持位置検出.
第27回 画像センシングシンポジウム, 2021.
[Bib TeX]
@misc{2021003935,
author = {{牧原 昂志, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫}},
date = {2021-06-09},
title = {pix2stiffnessによる柔軟物体の把持位置検出},
howpublished = {第27回 画像センシングシンポジウム},
language = {Japanese},
abstract = {本研究では、変形によって損傷が起きるような柔軟物体の把持を行う。RGB画像を入力として、物体の柔らかさをスコア化して画像に示したStiffness
mapを、GANによるImage
translation手法を用いて推定し、このmapを用いて画像中からの把持位置検出を行う。シミュレーションと実画像に対して、変形を防ぐことのできる適切な把持位置を検出した。},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[4]牧原 昂志, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫. pix2stiffnessによる柔軟物体の把持位置検出. 第27回画像センシングシンポジウム,
2021.
▶︎[Bib TeX]
[4]牧原 昂志, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫. pix2stiffnessによる柔軟物体の把持位置検出.
第27回画像センシングシンポジウム, 2021.
[Bib TeX]
@misc{2021004187,
author = {{牧原 昂志, 堂前 幸康, RamirezAlpizar Georgina Ixchel, 植芝 俊夫}},
date = {2021-06-09},
title = {pix2stiffnessによる柔軟物体の把持位置検出},
howpublished = {第27回画像センシングシンポジウム},
language = {Japanese},
abstract = {本研究では,柔軟物体のなかでも,把持力による変形で損傷してしまうものを対象とした把持位置を検出する.物体の柔らかさをスコアとして画像で示したStiffness
mapを,RGB画像の入力からGANのアーキテクチャを用いて推定する.Stiffness
mapを用いて,できるだけが硬い部分かつ,把持に成功しやすい適切な把持位置を検出する.シミュレーションと実画像に対して把持位置検出と把持実験を行った.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[5]Issei Sera, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, Zhenting Wang, Weiwei Wan, and Kensuke Harada.
Assembly planning by recognizing a graphical instruction manual. International Conference on Intelligent
Robots and Systems (IROS), 2021.
▶︎[Bib TeX]
[5]Issei Sera, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, Zhenting Wang, Weiwei
Wan,
and Kensuke Harada. Assembly planning by recognizing a graphical instruction manual. International
Conference on Intelligent Robots and Systems (IROS), 2021.
[Bib TeX]
@misc{2021004189,
author = {Issei Sera and Natsuki Yamanobe and Ixchel G. Ramirez-Alpizar and Zhenting Wang and Weiwei
Wan and Kensuke Harada},
date = {2021-09-27},
title = {Assembly Planning by Recognizing a Graphical Instruction Manual},
doi = {10.1109/IROS51168.2021.9636041},
eprint = {2106.00424},
eprintclass = {cs.RO},
eprinttype = {arXiv},
howpublished = {International Conference on Intelligent Robots and Systems (IROS)},
language = {English},
url = {https://ui.adsabs.harvard.edu/abs/2021arXiv210600424S},
abstract = {This paper proposes a robot assembly planning method by automatically reading the
graphical instruction manuals design for humans. Essentially, the method generates an Assembly Task
Sequence Graph (ATSG) by recognizing a graphical instruction manual. An ATSG is a graph describing
the
assembly task procedure by detecting types of parts included in the instruction images, completing
the
missing information automatically, and correcting the detection errors automatically. To build an
ATSG, the proposed method first extracts the information of the parts contained in each image of the
graphical instruction manual. Then, by using the extracted part information, it estimates the proper
work motions and tools for the assembly task. After that, the method builds an ATSG by considering
the
relationship between the previous and following images, which makes it possible to estimate the
undetected parts caused by occlusion using the information of the entire image series. Finally, by
collating the total number of each part with the generated ATSG, the excess or deficiency of parts
are
investigated, and task procedures are removed or added according to those parts. In the experiment
section, we build an ATSG using the proposed method to a graphical instruction manual for a chair
and
demonstrate the action sequences found in the ATSG can be performed by a dualarm robot execution.
The
results show the proposed method is effective and simplifies robot teaching in automatic
assembly.},
booktitle = {2021 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems
({IROS})},
creationdate = {2022-08-24T17:17:56},
eid = {arXiv:2106.00424},
journaltitle = {arXiv e-prints},
keywords = {Computer Science - Robotics},
pages = {arXiv:2106.00424},
publisher = {{IEEE}},
venue = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
year = {2021}
}
[6]Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. A topological solution of
entanglement for complex-shaped parts in robotic bin-picking. International Conference on Automation
Science
and Engineering (CASE), 2021.
▶︎[Bib TeX]
[6]Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. A
topological solution of entanglement for complex-shaped parts in robotic bin-picking. International
Conference on Automation Science and Engineering (CASE), 2021.
[Bib TeX]
@misc{2021004557,
author = {Xinyi Zhang and Keisuke Koyama and Yukiyasu Domae and Weiwei Wan and Kensuke
Harada},
date = {2021-08-24},
title = {A Topological Solution of Entanglement for Complex-shaped Parts in Robotic
Bin-picking},
doi = {10.1109/case49439.2021.9551426},
eprint = {2106.00943},
eprintclass = {cs.RO},
eprinttype = {arXiv},
howpublished = {International Conference on Automation Science and Engineering (CASE)},
language = {English},
url = {https://ui.adsabs.harvard.edu/abs/2021arXiv210600943Z},
abstract = {This paper addresses the problem of picking up only one object at a time avoiding any
entanglement in bin-picking. To cope with a difficult case where the complex-shaped objects are
heavily entangled together, we propose a topology-based method that can generate non-tangle grasp
positions on a single depth image. The core technique is entanglement map, which is a feature map to
measure the entanglement possibilities obtained from the input image. We use the entanglement map to
select probable regions containing graspable objects. The optimum grasping pose is detected from the
selected regions considering the collision between robot hand and objects. Experimental results show
that our analytic method provides a more comprehensive and intuitive observation of entanglement and
exceeds previous learning-based work in success rates. Especially, our topology-based method does
not
rely on any object models or time-consuming training process, so that it can be easily adapted to
more
complex bin- picking scenes.},
booktitle = {2021 {IEEE} 17th International Conference on Automation Science and Engineering
({CASE})},
creationdate = {2022-08-24T17:17:56},
eid = {arXiv:2106.00943},
journaltitle = {arXiv e-prints},
keywords = {Computer Science - Robotics},
pages = {arXiv:2106.00943},
publisher = {{IEEE}},
year = {2021}
}
[7]堂前 幸康. ニューノーマルとロボットのこれからの関係. 日本ロボット学会ロボット工学セミナー, 2021.
▶︎[Bib
TeX]
[7]堂前 幸康. ニューノーマルとロボットのこれからの関係. 日本ロボット学会ロボット工学セミナー, 2021.
[Bib TeX]
@misc{2021004560,
author = {堂前 幸康},
date = {2021-04-27},
title = {ニューノーマルとロボットのこれからの関係},
howpublished = {日本ロボット学会ロボット工学セミナー},
language = {Japanese},
abstract = {COVID-19がロボット研究にもたらした変化を分析するとともに,変化に対応する萌芽的研究成果を紹介する.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[8]堂前 幸康. スマートロボットの実証実験のための評価基準の策定. 情報処理学会連続セミナー, 2021.
▶︎[Bib
TeX]
[8]堂前 幸康. スマートロボットの実証実験のための評価基準の策定. 情報処理学会連続セミナー, 2021.
[Bib TeX]
@misc{2021004561,
author = {堂前 幸康},
date = {2021-10-14},
title = {スマートロボットの実証実験のための評価基準の策定},
howpublished = {情報処理学会連続セミナー},
language = {Japanese},
abstract = {連続セミナー2021「ニューノーマル時代に向けた情報技術の潮流」,「AI×ロボティクス -AI
とロボットの共進化-」内にてMOONSHOTで実施する研究テーマを紹介する.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[9]堂前 幸康. デジタルツインによる人・機械協調のためのロボットビジョン. 画像符号化シンポジウムPCSJ/映像メディア処理シンポジウムIMPS, 2021.
▶︎[Bib TeX]
[9]堂前 幸康. デジタルツインによる人・機械協調のためのロボットビジョン. 画像符号化シンポジウムPCSJ/映像メディア処理シンポジウムIMPS, 2021.
[Bib TeX]
@misc{2021006437,
author = {堂前 幸康},
date = {2021-11-15},
title = {デジタルツインによる人・機械協調のためのロボットビジョン},
howpublished = {画像符号化シンポジウムPCSJ/映像メディア処理シンポジウムIMPS},
language = {Japanese},
abstract =
{産総研インダストリアルCPS研究センターでは、デジタルツインによる人・機械協調の実現を目指し、研究活動を進めている。デジタルツインやAIの連携において、ロボットビジョンの果たす役割は大きい。本講演では、デジタルツインと人・機械協調に関する将来像と、サイバーフィジカルシステム研究棟での研究活動を紹介する。特にその中でロボットビジョンが果たす役割、それに関する技術動向を紹介する。},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[10]堂前 幸康. ニューノーマルにおけるロボットビジョン研究. 画像センシング展2021, 2021.
▶︎[Bib
TeX]
[10]堂前 幸康. ニューノーマルにおけるロボットビジョン研究. 画像センシング展2021, 2021.
[Bib TeX]
@misc{2021006442,
author = {堂前 幸康},
date = {2021-06-11},
title = {ニューノーマルにおけるロボットビジョン研究},
howpublished = {画像センシング展2021},
language = {Japanese},
abstract =
{コロナの影響が継続する中,ロボットビジョンの応用産業にどのような変化が生じているかを整理するとともに,世の中でどのような研究が進みはじめているかを紹介する.またそれに関する研究活動として,産総研のサイバーフィジカルシステム研究棟で取り組む,人・機械協調研究の内容を紹介する.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[11]Vitor H. Isume, Kensuke Harada, Weiwei Wan, and Yukiyasu Domae. Using affordances for assembly:
Towards
a complete craft assembly system. The International Conference on Control, Automation, and Systems
(ICCAS),
2021.
▶︎[Bib TeX]
[11]Vitor H. Isume, Kensuke Harada, Weiwei Wan, and Yukiyasu Domae. Using affordances
for assembly: Towards a complete craft assembly system. The International Conference on Control,
Automation, and Systems (ICCAS), 2021.
[Bib TeX]
@misc{2021007898,
author = {Vitor H. Isume and Kensuke Harada and Weiwei Wan and Yukiyasu Domae},
date = {2021-10-15},
title = {Using affordances for assembly: Towards a complete Craft Assembly System},
doi = {10.23919/ICCAS52745.2021.9649936},
howpublished = {The International Conference on Control, Automation, and Systems (ICCAS)},
language = {English},
url = {https://www.semanticscholar.org/paper/4a465bfbc499edf22636bd774f69b20230de45e3},
abstract = {When crafting a homemade object, such as in DIY (do-it-yourself) projects, a human is
able
to, from a goal object in mind, assemble a craft with the available objects in the scene without
having a set of instructions. Taking inspiration from this, we propose a robotic system capable of
performing such task, that we define as a Craft Assembly Task. In this paper, we show the
preliminary
version of our proposed system, focusing on the first step, where it needs to choose, from the
available objects, which ones should be used as the components of a given assembly. The possible
candidates are evaluated based on the visual likeness, using shape matching and dimensions
comparison
as the main criteria, and on functionality, using affordance matching. The desired final assembly is
given as an input to the system in a 3D CAD model, from which the system extracts the shape,
dimension
and affordance labels from each component, then using a framework of neural networks, it detects the
available objects in the scene and evaluate their affordances. After finding candidates with the
corresponding affordances, their point clouds are used to evaluate their shapes and dimensions by
using a RANSAC algorithm.},
booktitle = {2021 21st International Conference on Control, Automation and Systems ({ICCAS})},
creationdate = {2022-08-24T17:17:56},
publisher = {{IEEE}},
venue = {2021 21st International Conference on Control, Automation and Systems (ICCAS)},
year = {2021}
}
[12]Vitor Isume, Kensuke Harada, Wei Wei Wan, Yukiyasu Domae. Towards an affordance-based craft assembly.
日本ロボット学会学術講演会, 2021.
▶︎[Bib TeX]
[12]Vitor Isume, Kensuke Harada, Wei Wei Wan, Yukiyasu Domae. Towards an
affordance-based craft assembly. 日本ロボット学会学術講演会, 2021.
[Bib TeX]
@misc{2021008245,
author = {{Vitor Isume, Kensuke Harada, Wei Wei Wan, Yukiyasu Domae}},
date = {2021-09-09},
title = {Towards an affordance-based craft assembly},
howpublished = {日本ロボット学会学術講演会},
language = {English},
abstract = {In Do it Yourself (DIY) tasks, a human has an object it wants to build, then by
analyzing
the availablematerials, it defines a possible craft which is similarenough to the object it wants to
build, according tohis own criteria. Taking inspiration from this type oftask, we define a specific
assembly task: a Craft As-sembly Task. In this research, we propose a robotics ystem to solve this
task.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[13]堂前 幸康. 人・ロボット協調における産業用ロボットのAI化. レーザ協会第191回研究会, 2021.
▶︎[Bib
TeX]
[13]堂前 幸康. 人・ロボット協調における産業用ロボットのAI化. レーザ協会第191回研究会, 2021.
[Bib TeX]
@misc{2021009675,
author = {堂前 幸康},
date = {2021-09-15},
title = {人・ロボット協調における産業用ロボットの{AI}化},
howpublished = {レーザ協会第191回研究会},
language = {Japanese},
abstract = {産総研サイバーフィジカルシステム研究棟における人・ロボット協調に関する産業用ロボットのAI高度化技術について紹介する.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[14]Xinyi Zhang, Y. Domae, W. Wan, and K. Harada. Efficiently picking tangled-prone parts by learning a
sequential bin picking policy. 計測自動制御学会 システムインテグレーション部門講演会, 2021.
▶︎[Bib TeX]
[14]Xinyi Zhang, Y. Domae, W. Wan, and K. Harada. Efficiently picking tangled-prone
parts by learning a sequential bin picking policy. 計測自動制御学会 システムインテグレーション部門講演会, 2021.
[Bib TeX]
@misc{2021010144,
author = {Xinyi Zhang and Y. Domae and W. Wan and K. Harada},
date = {2021-12-15},
title = {Efficiently Picking Tangled-Prone Parts by Learning a Sequential Bin Picking Policy},
doi = {10.1109/case49439.2021.9551426},
howpublished = {計測自動制御学会 システムインテグレーション部門講演会},
language = {English},
url = {https://www.semanticscholar.org/paper/ffaa36a872485c76f3757e0f2418613e1bfdf944},
abstract = {This paper introduces an autonomous bin picking system for cable harnesses an extremely
challenging object in bin picking task. Currently cable harnesses are unsuitable to be imported to
automated production due to their length and elusive structures. Considering the task of robotic bin
picking where the harnesses are heavily entangled, it is challenging for a robot to pick harnesses
up
one by one using conventional bin picking methods. In this paper, we present an efficient approach
to
overcoming the difficulties when dealing with entangledprone parts. We develop several motion
schemes
for the robot to pick up a single harness avoiding any entanglement. Moreover, we proposed a
learning-based bin picking policy to select both grasps and designed motion schemes in a reasonable
sequence. Our method is unique due to the novelty for sufficiently solving the entanglement problem
in
picking cluttered cable harnesses. We demonstrate our approach on a set of real-world experiments,
during which the proposed method is capable to perform the sequential bin picking task with both
effectiveness and accuracy under a variety of cluttered scenarios.},
booktitle = {2021 {IEEE} 17th International Conference on Automation Science and Engineering
({CASE})},
creationdate = {2022-08-24T17:17:56},
publisher = {{IEEE}},
venue = {ArXiv},
year = {2021}
}
[15]白倉 尚貴, 高瀬 竜一, 山野辺 夏樹, 堂前 幸康. 人・ロボット協調作業におけるタイムプレッシャー管理と作業負荷および作業効率の関係の検証. 第22回 計測自動制御学会
システムインテグレーション部門講演会, 2021.
▶︎[Bib TeX]
[15]白倉 尚貴, 高瀬 竜一, 山野辺 夏樹, 堂前 幸康. 人・ロボット協調作業におけるタイムプレッシャー管理と作業負荷および作業効率の関係の検証.
第22回 計測自動制御学会 システムインテグレーション部門講演会, 2021.
[Bib TeX]
@misc{2021011376,
author = {{白倉 尚貴, 高瀬 竜一, 山野辺 夏樹, 堂前 幸康}},
date = {2021-12-17},
title = {人・ロボット協調作業におけるタイムプレッシャー管理と作業負荷および作業効率の関係の検証},
howpublished = {第22回 計測自動制御学会 システムインテグレーション部門講演会},
language = {Japanese},
abstract =
{人・ロボット協調作業の設計には,従来の自動システムで重視された効率のみでなく人の作業負荷の考慮も必要である.本論文では,人・ロボット協調作業における人に作業負荷を与える要因としてタイムプレッシャーに着目し,ロボットによってタイムプレッシャーを調整することで作業負荷をコントロール可能なシステムを構築した.被験者実験を行い,生理信号及びアンケートを用いて人の作業負荷と効率の関係を調査した.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[16]牧原 昂志, 堂前 幸康, 片岡 裕雄, RamirezAlpizar Georgina Ixchel, 原田 研介. アピアランスからの物体柔軟性推定に基づく把持位置検出. 第22回計測自動制御学会
システムインテグレーション部門講演会, 2021.
▶︎[Bib TeX]
[16]牧原 昂志, 堂前 幸康, 片岡 裕雄, RamirezAlpizar Georgina Ixchel, 原田 研介.
アピアランスからの物体柔軟性推定に基づく把持位置検出. 第22回計測自動制御学会 システムインテグレーション部門講演会, 2021.
[Bib TeX]
@misc{2021012513,
author = {{牧原 昂志, 堂前 幸康, 片岡 裕雄, RamirezAlpizar Georgina Ixchel, 原田 研介}},
date = {2021-12-17},
title = {アピアランスからの物体柔軟性推定に基づく把持位置検出},
howpublished = {第22回計測自動制御学会 システムインテグレーション部門講演会},
language = {Japanese},
abstract =
{本研究では,物体のアピアランスから柔軟性を推定し,単一の深度画像から把持位置を検出する手法を提案する.Encorder-Decoder構造の深層学習モデルによって,個々の物体領域ごとに柔軟性を推定する.推定結果とハンドモデルの画像を利用し把持位置検出を行うことで,様々な対象物やシーンに適用する.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[17]Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Weiwei Wan, and Kensuke Harada. Planning an efficient and
robust base sequence for a mobile manipulator performing multiple pick-and-place tasks. IEEE Access,
9(9):165526–165541, 2021.
▶︎[Bib TeX]
[17]Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Weiwei Wan, and Kensuke Harada.
Planning an efficient and robust base sequence for a mobile manipulator performing multiple
pick-and-place tasks. IEEE Access, 9(9):165526–165541, 2021.
[Bib TeX]
@article{2020009946,
author = {Jingren Xu and Yukiyasu Domae and Toshio Ueshiba and Weiwei Wan and Kensuke Harada},
date = {2021-2},
journaltitle = {arXiv e-prints},
title = {Planning an Efficient and Robust Base Sequence for a Mobile Manipulator Performing Multiple
Pick-and-place Tasks},
doi = {10.1109/ICRA40945.2020.9196999},
eid = {arXiv:2001.08042},
eprint = {2001.08042},
eprintclass = {cs.RO},
eprinttype = {arXiv},
issn = {2169-3536},
language = {English},
number = {9},
pages = {165526--165541},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200108042X},
volume = {9},
abstract = {In this paper, we address efficiently and robustly collecting objects stored in
different
trays using a mobile manipulator. A resolution complete method, based on precomputed reachability
database, is proposed to explore collision-free inverse kinematics (IK) solutions and then a
resolution complete set of feasible base positions can be determined. This method approximates a set
of representative IK solutions that are especially helpful when solving IK and checking collision
are
treated separately. For real world applications, we take into account the base positioning
uncertainty
and plan a sequence of base positions that reduce the number of necessary base movements for
collecting the target objects, the base sequence is robust in that the mobile manipulator is able to
complete the part-supply task even there is certain deviation from the planned base positions. Our
experiments demonstrate both the efficiency compared to regular base sequence and the feasibility in
real world applications.},
creationdate = {2022-08-24T17:17:56},
journal = {IEEE Access},
keywords = {Computer Science - Robotics},
publisher = {IEEE-INST ELECTRICAL ELECTRONICS ENGINEERS INC},
venue = {2020 IEEE International Conference on Robotics and Automation (ICRA)},
year = {2021}
}
[18]Kazuyuki Nagata and Takao Nishi. Modeling object arrangement patterns and picking arranged
objects. ADVANCED ROBOTICS, 35(16):981–994, 2021.
▶︎[Bib
TeX]
[18]Kazuyuki Nagata and Takao Nishi. Modeling object arrangement patterns and picking
arranged objects. ADVANCED ROBOTICS, 35(16):981–994, 2021.
[Bib TeX]
@article{2020012498,
author = {Kazuyuki Nagata and Takao Nishi},
date = {2021-7},
journaltitle = {Advanced Robotics},
title = {Modeling object arrangement patterns and picking arranged objects},
doi = {10.1080/01691864.2021.1948446},
issn = {0169-1864},
language = {English},
number = {16},
pages = {981--994},
url = {https://www.semanticscholar.org/paper/d4260c4dcc52ca9da42c160ad25f59b640d452fd},
volume = {35},
abstract = {ABSTRACT This study investigates object picking focusing on object arrangement patterns.
Objects stored in distribution warehouses or stores are arranged in regular patterns, and the
grasping
strategy for object picking is selected according to the object arrangement pattern. However, object
arrangement patterns have not been modeled for object picking. In this study, we represent objects
as
polyhedral primitives, such as cuboids or hexagonal cylinders, and model object arrangements by
considering occlusion patterns for object model surfaces and considering whether the adjacent object
occluding the surface is moveable. We define grasp patterns based on combinations of the grasp
surfaces and discuss the grasping strategy when the grasp surfaces are occluded by adjacent objects.
We then introduce newly developed gripper for picking arranged objects. The gripper comprises a
suction gripper and a two-fingered gripper. The suction gripper has a telescopic arm and a swing
suction cup. The two-fingered gripper mechanism combines a Scott Russell linkage and a parallel
link.
This mechanism is advantageous for the gripper in reaching narrow spaces and inserting fingers
between
objects. We demonstrate the picking up of arranged objects using the grippers. GRAPHICAL
ABSTRACT},
creationdate = {2022-08-24T17:17:56},
journal = {ADVANCED ROBOTICS},
publisher = {TAYLOR & FRANCIS LTD},
venue = {Adv. Robotics},
year = {2021}
}
[19]Enrique Coronado, Kosuke Fukuda, Ixchel G. Ramirez-Alpizar, Natsuki Yamanobe, Gentiane Venture, and
Kensuke Harada. Assembly action understanding from fine-grained hand motions, a multi-camera and deep
learning approach. In 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS),
pages
2605–2611. IEEE Explore, 2021.
▶︎[Bib TeX]
[19]Enrique Coronado, Kosuke Fukuda, Ixchel G. Ramirez-Alpizar, Natsuki Yamanobe,
Gentiane Venture, and Kensuke Harada. Assembly action understanding from fine-grained hand motions, a
multi-camera and deep learning approach. In 2021 IEEE/RSJ International Conference on Intelligent
Robots
and Systems (IROS), pages 2605–2611. IEEE Explore, 2021.
[Bib TeX]
@inproceedings{2021003385,
author = {Enrique Coronado and Kosuke Fukuda and Ixchel G. Ramirez-Alpizar and Natsuki Yamanobe and
Gentiane Venture and Kensuke Harada},
booktitle = {2021 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems
({IROS})},
date = {2021-9},
title = {Assembly Action Understanding from Fine-Grained Hand Motions, a Multi-camera and Deep
Learning approach},
doi = {10.1109/IROS51168.2021.9636715},
language = {English},
pages = {2605--2611},
publisher = {IEEE Explore},
url = {https://www.semanticscholar.org/paper/bb8dba5ffdd4093c03fa6878246013f15b7da372},
venue = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
abstract = {This article presents a novel software architecture enabling the analysis of assembly
actions from fine-grained hand motions. Unlike previous works that compel humans to wear ad-hoc
devices or visual markers in the human body, our approach enables users to move without additional
burdens. Modules developed are able to: (i) reconstruct the 3D motions of body and hands keypoints
using multi-camera systems; (ii) recognize objects manipulated by humans, and (iii) analyze the
relationship between the human motions and the manipulated objects. We implement different solutions
based on OpenPose and Mediapipe for body and hand keypoint detection. Additionally, we discuss the
suitability of these solutions for enabling real-time data processing. We also propose a novel
method
using Long Short-Term Memory (LSTM) deep neural networks to analyze the relationship between the
detected human motions and manipulated objects. Experimental validations show the superiority of the
proposed approach against previous works based on Hidden Markov Models (HMMs).},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[20]Issei Sera, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, Zhenting Wang, Weiwei Wan, and Kensuke
Harada.
Assembly planning by recognizing a graphical instruction manual. In 2021 IEEE/RSJ International Conference
on Intelligent Robots and Systems (IROS), page arXiv:2106.00424. IEEE Explore, 2021.
▶︎[Bib TeX]
[20]Issei Sera, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, Zhenting Wang, Weiwei
Wan,
and Kensuke Harada. Assembly planning by recognizing a graphical instruction manual. In 2021 IEEE/RSJ
International Conference on Intelligent Robots and Systems (IROS), page arXiv:2106.00424. IEEE
Explore,
2021.
[Bib TeX]
@inproceedings{2021004190,
author = {Issei Sera and Natsuki Yamanobe and Ixchel G. Ramirez-Alpizar and Zhenting Wang and Weiwei
Wan and Kensuke Harada},
booktitle = {2021 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems
({IROS})},
date = {2021-9},
title = {Assembly Planning by Recognizing a Graphical Instruction Manual},
doi = {10.1109/IROS51168.2021.9636041},
eprint = {2106.00424},
eprintclass = {cs.RO},
eprinttype = {arXiv},
language = {English},
pages = {arXiv:2106.00424},
publisher = {IEEE Explore},
url = {https://ui.adsabs.harvard.edu/abs/2021arXiv210600424S},
venue = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
abstract = {This paper proposes a robot assembly planning method by automatically reading the
graphical instruction manuals design for humans. Essentially, the method generates an Assembly Task
Sequence Graph (ATSG) by recognizing a graphical instruction manual. An ATSG is a graph describing
the
assembly task procedure by detecting types of parts included in the instruction images, completing
the
missing information automatically, and correcting the detection errors automatically. To build an
ATSG, the proposed method first extracts the information of the parts contained in each image of the
graphical instruction manual. Then, by using the extracted part information, it estimates the proper
work motions and tools for the assembly task. After that, the method builds an ATSG by considering
the
relationship between the previous and following images, which makes it possible to estimate the
undetected parts caused by occlusion using the information of the entire image series. Finally, by
collating the total number of each part with the generated ATSG, the excess or deficiency of parts
are
investigated, and task procedures are removed or added according to those parts. In the experiment
section, we build an ATSG using the proposed method to a graphical instruction manual for a chair
and
demonstrate the action sequences found in the ATSG can be performed by a dualarm robot execution.
The
results show the proposed method is effective and simplifies robot teaching in automatic
assembly.},
creationdate = {2022-08-24T17:17:56},
eid = {arXiv:2106.00424},
journaltitle = {arXiv e-prints},
keywords = {Computer Science - Robotics},
year = {2021}
}
[21]Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. A topological solution of
entanglement for complex-shaped parts in robotic bin-picking. In International Conference on Automation
Science and Engineering (CASE), pages 461–467. IEEE, 2021.
▶︎[Bib TeX]
[21]Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. A
topological solution of entanglement for complex-shaped parts in robotic bin-picking. In International
Conference on Automation Science and Engineering (CASE), pages 461–467. IEEE, 2021.
[Bib TeX]
@inproceedings{2021004558,
author = {Xinyi Zhang and Keisuke Koyama and Yukiyasu Domae and Weiwei Wan and Kensuke
Harada},
booktitle = {International Conference on Automation Science and Engineering (CASE)},
date = {2021-8},
title = {A Topological Solution of Entanglement for Complex-shaped Parts in Robotic
Bin-picking},
doi = {10.1109/case49439.2021.9551426},
eprint = {2106.00943},
eprintclass = {cs.RO},
eprinttype = {arXiv},
language = {English},
pages = {461--467},
publisher = {IEEE},
url = {https://ui.adsabs.harvard.edu/abs/2021arXiv210600943Z},
abstract = {This paper addresses the problem of picking up only one object at a time avoiding any
entanglement in bin-picking. To cope with a difficult case where the complex-shaped objects are
heavily entangled together, we propose a topology-based method that can generate non-tangle grasp
positions on a single depth image. The core technique is entanglement map, which is a feature map to
measure the entanglement possibilities obtained from the input image. We use the entanglement map to
select probable regions containing graspable objects. The optimum grasping pose is detected from the
selected regions considering the collision between robot hand and objects. Experimental results show
that our analytic method provides a more comprehensive and intuitive observation of entanglement and
exceeds previous learning-based work in success rates. Especially, our topology-based method does
not
rely on any object models or time-consuming training process, so that it can be easily adapted to
more
complex bin- picking scenes.},
creationdate = {2022-08-24T17:17:56},
eid = {arXiv:2106.00943},
issn = {2161-8070},
journaltitle = {arXiv e-prints},
keywords = {Computer Science - Robotics},
year = {2021}
}
[22]Peihao Shi, Zhengtao Hu, Kazuyuki Nagata, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada. Development
of
a shape-memorable adaptive pin array fixture. ADVANCED ROBOTICS, 35(10):591–602, 2021.
▶︎[Bib TeX]
[22]Peihao Shi, Zhengtao Hu, Kazuyuki Nagata, Weiwei Wan, Yukiyasu Domae, and Kensuke
Harada. Development of a shape-memorable adaptive pin array fixture. ADVANCED ROBOTICS,
35(10):591–602, 2021.
[Bib TeX]
@article{2021004586,
author = {Peihao Shi and Zhengtao Hu and Kazuyuki Nagata and Weiwei Wan and Yukiyasu Domae and
Kensuke
Harada},
date = {2021-4},
journaltitle = {arXiv e-prints},
title = {Development of a Shape-memorable Adaptive Pin Array Fixture},
doi = {10.1080/01691864.2021.1911845},
eid = {arXiv:2005.09968},
eprint = {2005.09968},
eprintclass = {cs.RO},
eprinttype = {arXiv},
issn = {0169-1864},
language = {English},
number = {10},
pages = {591--602},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200509968S},
volume = {35},
abstract = {This paper proposes an adaptive pin-array fixture. The key idea of this research is to
use
the shape-memorable mechanism of pin array to fix multiple different shaped parts with common pin
configuration. The clamping area consists of a matrix of passively slid-able pins that conform
themselves to the contour of the target object. Vertical motion of the pins enables the fixture to
encase the profile of the object. The shape memorable mechanism is realized by the combination of
the
rubber bush and fixing mechanism of a pin. Several physical peg-in-hole tasks is conducted to verify
the feasibility of the fixture.},
creationdate = {2022-08-24T17:17:56},
journal = {ADVANCED ROBOTICS},
keywords = {Computer Science - Robotics},
publisher = {TAYLOR & FRANCIS LTD},
year = {2021}
}
[23]Vitor H. Isume, Kensuke Harada, Weiwei Wan, and Yukiyasu Domae. Using affordances for assembly:
Towards
a complete craft assembly system. In The Proceeding of The International Conference on Control,
Automation,
and Systems (ICCAS). Institute of Control, Robotics and Systems, 2021.
▶︎[Bib TeX]
[23]Vitor H. Isume, Kensuke Harada, Weiwei Wan, and Yukiyasu Domae. Using affordances
for assembly: Towards a complete craft assembly system. In The Proceeding of The International
Conference on Control, Automation, and Systems (ICCAS). Institute of Control, Robotics and Systems,
2021.
[Bib TeX]
@inproceedings{2021007899,
author = {Vitor H. Isume and Kensuke Harada and Weiwei Wan and Yukiyasu Domae},
booktitle = {The Proceeding of The International Conference on Control, Automation, and Systems
(ICCAS)},
date = {2021-8},
title = {Using affordances for assembly: Towards a complete Craft Assembly System},
doi = {10.23919/ICCAS52745.2021.9649936},
language = {English},
publisher = {Institute of Control, Robotics and Systems},
url = {https://www.semanticscholar.org/paper/4a465bfbc499edf22636bd774f69b20230de45e3},
venue = {2021 21st International Conference on Control, Automation and Systems (ICCAS)},
abstract = {When crafting a homemade object, such as in DIY (do-it-yourself) projects, a human is
able
to, from a goal object in mind, assemble a craft with the available objects in the scene without
having a set of instructions. Taking inspiration from this, we propose a robotic system capable of
performing such task, that we define as a Craft Assembly Task. In this paper, we show the
preliminary
version of our proposed system, focusing on the first step, where it needs to choose, from the
available objects, which ones should be used as the components of a given assembly. The possible
candidates are evaluated based on the visual likeness, using shape matching and dimensions
comparison
as the main criteria, and on functionality, using affordance matching. The desired final assembly is
given as an input to the system in a 3D CAD model, from which the system extracts the shape,
dimension
and affordance labels from each component, then using a framework of neural networks, it detects the
available objects in the scene and evaluate their affordances. After finding candidates with the
corresponding affordances, their point clouds are used to evaluate their shapes and dimensions by
using a RANSAC algorithm.},
creationdate = {2022-08-24T17:17:56},
issn = {2093-7121},
year = {2021}
}
[24]原田 研介, 堂前 幸康. 学習に基づくバラ積みからのピッキング. In 画像ラボ, volume 32, pages 60–66. 日本工業出版, 2021.
▶︎[Bib TeX]
[24]原田 研介, 堂前 幸康. 学習に基づくバラ積みからのピッキング. In 画像ラボ, volume 32, pages 60–66. 日本工業出版,
2021.
[Bib TeX]
@incollection{2021010005,
author = {{原田 研介, 堂前 幸康}},
booktitle = {画像ラボ},
date = {2021-2},
title = {学習に基づくバラ積みからのピッキング},
language = {Japanese},
number = {12},
pages = {60--66},
publisher = {日本工業出版},
volume = {32},
abstract =
{近年,製品のライフサイクルが短くなることで,製品の製造形態も大量生産から多品種少量生産や変種変量生産へと移行している.また,それに伴い,製品を組み立てる生産形態もライン生産からセル生産へと移行している.セル生産における製品の組立において,作業者は部品箱から必要な部品をとり,作業台の上で組み付ける.従来より産業用ロボットの動作を生成するために用いられてきたティーチング・プレイバック方式は,セル生産の自動化に対して直接的に適用するのは困難な場合が多い.セル生産を自動化するために,ロボットは多種多様な部品が箱に乱雑に格納された状態から,部品を一個ずつ取り出さなくてはならない.このような作業のことを,本稿ではバラ積みピッキングと呼ぶ.このとき,個々の部品に対して,いちいちバラ積みピッキングのための視覚認識や把持に関するパラメータを設定していたのでは,ロボットは多種多様な対象物をピッキングすることは困難である.このような問題に対処するために,学習に基づいたバラ積みピッキングの手法が注目されている.深層学習は産業のあらゆる場面に浸透し,ロボット化を大幅に推進する可能性を秘めているが,ここではバラ積みピッキングに対する深層学習の適用方法について説明する.},
creationdate = {2022-08-24T17:17:56},
year = {2021}
}
[25]Akira Nakamura, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, Kensuke Harada, and Yukiyasu Domae.
Selection of optimal error recovery process using evaluation standards in automated plants. Journal of
Robotics, Networking and Artificial Life, 8(3):211–217, 2021.
▶︎[Bib TeX]
[25]Akira Nakamura, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, Kensuke Harada, and
Yukiyasu Domae. Selection of optimal error recovery process using evaluation standards in automated
plants. Journal of Robotics, Networking and Artificial Life, 8(3):211–217, 2021.
[Bib TeX]
@article{2022003163,
author = {Akira Nakamura and Natsuki Yamanobe and Ixchel G. Ramirez-Alpizar and Kensuke Harada and
Yukiyasu Domae},
title = {Selection of Optimal Error Recovery Process using Evaluation Standards in Automated
Plants},
doi = {10.2991/jrnal.k.210922.012},
issn = {2352-6386},
language = {English},
number = {3},
pages = {211--217},
url = {https://www.semanticscholar.org/paper/d432157fa127f2d03ee2b24e293114bb6302ad8a},
volume = {8},
abstract = {1Department of Information Systems, Faculty of Engineering, Saitama Institute of
Technology, 1690 Fusaiji, Fukaya, Saitama 369-0293, Japan 2Industrial Cyber Physical System Research
Center, National Institute of Advanced Industrial Science and Technology (AIST) Second Annex, AIST
Tokyo Waterfront, 2-4-7 Aomi, Koto-ku, Tokyo 135-0064 Japan 3Robotic Manipulation Research Group
Systems Innovation Department, Graduate School of Engineering Science, Osaka University, 1-3
Machikaneyama, Toyonaka 560-8531, Japan},
creationdate = {2022-08-24T17:17:56},
journal = {Journal of Robotics, Networking and Artificial Life},
publisher = {Atlantis Press International B.V.},
venue = {J. Robotics Netw. Artif. Life},
year = {2021}
}
[26]Yan Wang, Cristian C. Beltran-Hernandez, Weiwei Wan, and Kensuke Harada. Robotic imitation of human
assembly skills using hybrid trajectory and force learning. In 2021 IEEE International Conference on
Robotics and Automation (ICRA), page arXiv:2103.05912. IEEE, 2021.
▶︎[Bib TeX]
[26]Yan Wang, Cristian C. Beltran-Hernandez, Weiwei Wan, and Kensuke Harada. Robotic
imitation of human assembly skills using hybrid trajectory and force learning. In 2021 IEEE
International Conference on Robotics and Automation (ICRA), page arXiv:2103.05912. IEEE, 2021.
[Bib TeX]
@inproceedings{2022004498,
author = {Yan Wang and Cristian C. Beltran-Hernandez and Weiwei Wan and Kensuke Harada},
booktitle = {2021 {IEEE} International Conference on Robotics and Automation ({ICRA})},
date = {2021-5},
title = {Robotic Imitation of Human Assembly Skills Using Hybrid Trajectory and Force
Learning},
doi = {10.1109/icra48506.2021.9561619},
eprint = {2103.05912},
eprintclass = {cs.RO},
eprinttype = {arXiv},
language = {English},
pages = {arXiv:2103.05912},
publisher = {IEEE},
url = {https://ui.adsabs.harvard.edu/abs/2021arXiv210305912W},
abstract = {Robotic assembly tasks involve complex and low-clearance insertion trajectories with
varying contact forces at different stages. While the nominal motion trajectory can be easily
obtained
from human demonstrations through kinesthetic teaching, teleoperation, simulation, among other
methods, the force profile is harder to obtain especially when a real robot is unavailable. It is
difficult to obtain a realistic force profile in simulation even with physics engines. Such
simulated
force profiles tend to be unsuitable for the actual robotic assembly due to the reality gap and
uncertainty in the assembly process. To address this problem, we present a combined learning-based
framework to imitate human assembly skills through hybrid trajectory learning and force learning.
The
main contribution of this work is the development of a framework that combines hierarchical
imitation
learning, to learn the nominal motion trajectory, with a reinforcement learning-based force control
scheme to learn an optimal force control policy. To further improve the imitation learning part, we
develop a hierarchical architecture, following the idea of goal-conditioned imitation learning, to
generate the trajectory learning policy on the skill level offline. Through experimental
validations,
we corroborate that the proposed learning-based framework is robust to uncertainty in the assembly
task, can generate high-quality trajectories, and can find suitable force control policies, which
adapt to the task’s force requirements more efficiently.},
creationdate = {2022-08-24T17:17:56},
eid = {arXiv:2103.05912},
issn = {1049-3492},
journaltitle = {arXiv e-prints},
keywords = {Computer Science - Robotics},
year = {2021}
}
[27]Tomohiro Motoda, Damien Petit, Weiwei Wan, and Kensuke Harada. Bimanual shelf picking planner based on
collapse prediction. In 2021 IEEE 17th International Conference on Automation Science and Engineering
(CASE), page arXiv:2105.14764. IEEE, 2021.
▶︎[Bib TeX]
[27]Tomohiro Motoda, Damien Petit, Weiwei Wan, and Kensuke Harada. Bimanual shelf
picking planner based on collapse prediction. In 2021 IEEE 17th International Conference on Automation
Science and Engineering (CASE), page arXiv:2105.14764. IEEE, 2021.
[Bib TeX]
@inproceedings{2022004507,
author = {Tomohiro Motoda and Damien Petit and Weiwei Wan and Kensuke Harada},
booktitle = {2021 {IEEE} 17th International Conference on Automation Science and Engineering
({CASE})},
date = {2021-8},
title = {Bimanual Shelf Picking Planner Based on Collapse Prediction},
doi = {10.1109/CASE49439.2021.9551507},
eprint = {2105.14764},
eprintclass = {cs.RO},
eprinttype = {arXiv},
language = {English},
pages = {arXiv:2105.14764},
publisher = {IEEE},
url = {https://ui.adsabs.harvard.edu/abs/2021arXiv210514764M},
venue = {2021 IEEE 17th International Conference on Automation Science and Engineering
(CASE)},
abstract = {In logistics warehouse, since many objects are randomly stacked on shelves, it becomes
difficult for a robot to safely extract one of the objects without other objects falling from the
shelf. In previous works, a robot needed to extract the target object after rearranging the
neighboring objects. In contrast, humans extract an object from a shelf while supporting other
neighboring objects. In this paper, we propose a bimanual manipulation planner based on collapse
prediction trained with data generated from a physics simulator, which can safely extract a single
object while supporting the other objects. We confirmed that the proposed method achieves more than
80% success rate for safe extraction by real-world experiments using a dual-arm manipulator.},
creationdate = {2022-08-24T17:17:56},
eid = {arXiv:2105.14764},
issn = {2161-8089},
journaltitle = {arXiv e-prints},
keywords = {Computer Science - Robotics},
year = {2021}
}
[28]Takumi Sakamoto, Weiwei Wan, Takao Nishi, and Kensuke Harada. Efficient picking by considering
simultaneous two-object grasping. In Proceedings of 2021 IEEE/RSJ International Conference on Intelligent
Robots and Systems (IROS) on Automation Science and Engineering (CASE). IEEE, 2021.
▶︎[Bib TeX]
[28]Takumi Sakamoto, Weiwei Wan, Takao Nishi, and Kensuke Harada. Efficient picking
by
considering simultaneous two-object grasping. In Proceedings of 2021 IEEE/RSJ International Conference
on Intelligent Robots and Systems (IROS) on Automation Science and Engineering (CASE). IEEE, 2021.
[Bib TeX]
@inproceedings{2022004510,
author = {Takumi Sakamoto and Weiwei Wan and Takao Nishi and Kensuke Harada},
booktitle = {Proceedings of 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems
(IROS) on Automation Science and Engineering (CASE)},
date = {2021-9},
title = {Efficient Picking by Considering Simultaneous Two-Object Grasping},
doi = {10.1109/IROS51168.2021.9636727},
language = {Japanese},
publisher = {IEEE},
url = {https://www.semanticscholar.org/paper/9f01cbb56cbcf08ceb7c6f40a80ad621d6930f2a},
venue = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
abstract = {This paper presents a motion planning algorithm that enables robots to efficiently pick
up
objects by considering simultaneous multi-object grasping. At the center of the algorithm is a cost
function that helps to determine one of the following three grasping policies considering distance
and
friction constraints ? Grasping a single object; Grasping two objects simultaneously; Grasping two
object simultaneously after pushing one of the objects close to the other. After recognizing the
object distributions on a table by using a depth camera and Mask R-CNN, our algorithm will select
grasp policies from the three candidates considering the cost function, and plan a policy sequence
that can most quickly finish picking all the objects using dynamic programming. Both simulation and
real-world experiments are carried out to examine the performance of the proposed planner. Results
show that the proposed method significantly improves the efficiency of robotic picking compared to
conventional single-object-based methods.},
creationdate = {2022-08-24T17:17:56},
issn = {2153-0866},
year = {2021}
}
[29]Jingren Xu, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. An optimization-based motion planner for a
mobile manipulator to perform tasks during the motion. IEEE/SICE International Symposium on System
Integration (SII), 2022.
▶︎[Bib TeX]
[29]Jingren Xu, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. An optimization-based
motion planner for a mobile manipulator to perform tasks during the motion. IEEE/SICE International
Symposium on System Integration (SII), 2022.
[Bib TeX]
@misc{2021009673,
author = {Jingren Xu and Yukiyasu Domae and Weiwei Wan and Kensuke Harada},
date = {2022-01-08},
title = {An Optimization-based Motion Planner for a Mobile Manipulator to Perform Tasks During the
Motion},
doi = {10.1109/sii52469.2022.9708859},
howpublished = {IEEE/SICE International Symposium on System Integration (SII)},
language = {English},
abstract = {In this paper, we present an optimization-based motion planner to plan the time-optimal
whole-body motion of a nonholonomic mobile manipulator, to pick up objects while simultaneously
moving
the manipulator and the base.The simultaneous motion further reduces the operation time of the
picking
tasks. What distinguishes our planner from the common motion planners, which plan the motion between
two configurations, is that our planner considers performing tasks, such as grasping an object,
during
the motion. We formulate the time-optimal motion planning as an optimization problem. One of the
major
difficulties is to find an appropriate representation of the tasks during the motion, since the time
and configuration of the robot at the moment of performing the task are unknown. To address this
issue, we propose a novel formulation of the optimization variables such that constraints arising
from
the task are smooth and differentiable, which is essential for obtaining the feasible solution using
an NLP solver. We present preliminary numerical result of the proposed planner, it shows that our
planner can obtain feasible trajectory that satisfy all the constraints.},
booktitle = {2022 {IEEE}/{SICE} International Symposium on System Integration ({SII})},
creationdate = {2022-08-24T17:17:56},
publisher = {{IEEE}},
year = {2022}
}
[30]堂前 幸康. シミュレーションとAI・ロボティクスを活用した人・機械協調の取り組み. AI時代のモノづくりセミナー, 2022.
▶︎[Bib TeX]
[30]堂前 幸康. シミュレーションとAI・ロボティクスを活用した人・機械協調の取り組み. AI時代のモノづくりセミナー, 2022.
[Bib TeX]
@misc{2022001928,
author = {堂前 幸康},
date = {2022-02-21},
title = {シミュレーションと{AI}・ロボティクスを活用した人・機械協調の取り組み},
howpublished = {AI時代のモノづくりセミナー},
language = {Japanese},
abstract =
{少子高齢化により労働生産人口の減少トレンドが持続する中、AIやロボット技術によりどのような対応をとっていくべきか。本講演では、人と機械が協調するためのサイバーフィジカルシステムの萌芽的取り組みと、基盤となるAI・ロボティクスのシミュレーション技術を紹介するとともに、社会問題へのアプローチ、実証事例を示す。},
creationdate = {2022-08-24T17:17:56},
year = {2022}
[31]Jingren Xu, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. An optimization-based motion planner for a
mobile manipulator to perform tasks during the motion. In Proceeding of The 2022 IEEE/SICE International
Symposium on System Integration, pages 519–524. IEEE/SICE, 2022.
▶︎[Bib TeX]
[31]Jingren Xu, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. An optimization-based
motion planner for a mobile manipulator to perform tasks during the motion. In Proceeding of The 2022
IEEE/SICE International Symposium on System Integration, pages 519–524. IEEE/SICE, 2022.
[Bib TeX]
@inproceedings{2021009674,
author = {Jingren Xu and Yukiyasu Domae and Weiwei Wan and Kensuke Harada},
booktitle = {Proceeding of The 2022 IEEE/SICE International Symposium on System Integration},
date = {2022-2},
title = {An Optimization-based Motion Planner for a Mobile Manipulator to Perform Tasks During the
Motion},
doi = {10.1109/SII52469.2022.9708859},
language = {English},
pages = {519--524},
publisher = {IEEE/SICE},
url = {https://www.semanticscholar.org/paper/b9a3e5bb0fca30a725ab298af2b1ceed73918fcd},
venue = {2022 IEEE/SICE International Symposium on System Integration (SII)},
abstract = {In this paper, we present an optimization-based motion planner to plan the time-optimal
whole-body motion of a nonholonomic mobile manipulator, to pick up objects while simultaneously
moving
the manipulator and the base.The simultaneous motion further reduces the operation time of the
picking
tasks. What distinguishes our planner from the common motion planners, which plan the motion between
two configurations, is that our planner considers performing tasks, such as grasping an object,
during
the motion. We formulate the time-optimal motion planning as an optimization problem. One of the
major
difficulties is to find an appropriate representation of the tasks during the motion, since the time
and configuration of the robot at the moment of performing the task are unknown. To address this
issue, we propose a novel formulation of the optimization variables such that constraints arising
from
the task are smooth and differentiable, which is essential for obtaining the feasible solution using
an NLP solver. We present preliminary numerical result of the proposed planner, it shows that our
planner can obtain feasible trajectory that satisfy all the constraints.},
creationdate = {2022-08-24T17:17:56},
year = {2022}
}
[32]Yan Wang, Cristian Camilo Beltran-Hernandez, Wei Wei Wan, Kensuke Harada. Hybrid trajectory and force
learning of complex assembly tasks: A combined learning framework. IEEE Access, 2022.
▶︎[Bib TeX]
[32]Yan Wang, Cristian Camilo Beltran-Hernandez, Wei Wei Wan, Kensuke Harada. Hybrid
trajectory and force learning of complex assembly tasks: A combined learning framework. IEEE Access,
2022.
[Bib TeX]
@article{2022004364,
author = {{Yan Wang, Cristian Camilo Beltran-Hernandez, Wei Wei Wan, Kensuke Harada}},
date = {2022-1},
title = {Hybrid Trajectory and Force Learning of Complex Assembly Tasks: {A} Combined Learning
Framework},
issn = {2169-3536},
language = {English},
creationdate = {2022-08-24T17:17:56},
journal = {IEEE Access},
publisher = {IEEE-INST ELECTRICAL ELECTRONICS ENGINEERS INC},
year = {2022}
[33]Yan Wang, Cristian C. Beltran-Hernandez, Weiwei Wan, and Kensuke Harada. An adaptive imitation
learning
framework for robotic complex contact-rich insertion tasks. FRONTIERS IN ROBOTICS AND AI, 8, 2022.
▶︎[Bib TeX]
[33]Yan Wang, Cristian C. Beltran-Hernandez, Weiwei Wan, and Kensuke Harada. An
adaptive imitation learning framework for robotic complex contact-rich insertion tasks. FRONTIERS IN
ROBOTICS AND AI, 8, 2022.
[Bib TeX]
@article{2022004494,
author = {Yan Wang and Cristian C. Beltran-Hernandez and Weiwei Wan and Kensuke Harada},
date = {2022-1},
title = {An Adaptive Imitation Learning Framework for Robotic Complex Contact-Rich Insertion
Tasks},
doi = {10.3389/frobt.2021.777363},
issn = {2296-9144},
language = {English},
url = {https://www.semanticscholar.org/paper/b5e124ace60793321b82ff4243f505e5cc2ca1b1},
volume = {8},
abstract = {Complex contact-rich insertion is a ubiquitous robotic manipulation skill and usually
involves nonlinear and low-clearance insertion trajectories as well as varying force requirements. A
hybrid trajectory and force learning framework can be utilized to generate high-quality trajectories
by imitation learning and find suitable force control policies efficiently by reinforcement
learning.
However, with the mentioned approach, many human demonstrations are necessary to learn several tasks
even when those tasks require topologically similar trajectories. Therefore, to reduce human
repetitive teaching efforts for new tasks, we present an adaptive imitation framework for robot
manipulation. The main contribution of this work is the development of a framework that introduces
dynamic movement primitives into a hybrid trajectory and force learning framework to learn a
specific
class of complex contact-rich insertion tasks based on the trajectory profile of a single task
instance belonging to the task class. Through experimental evaluations, we validate that the
proposed
framework is sample efficient, safer, and generalizes better at learning complex contact-rich
insertion tasks on both simulation environments and on real hardware.},
creationdate = {2022-08-24T17:17:56},
journal = {FRONTIERS IN ROBOTICS AND AI},
pmid = {35087872},
publisher = {FRONTIERS MEDIA SA},
venue = {Frontiers in Robotics and AI},
year = {2022}
}
2020年度
[1]勝田 顕光. 視触覚センサ応用のための透過・不透過素材の特性を利用した物体姿勢推定. MIRU2020, 2020.
▶︎[Bib TeX]
[1]勝田 顕光. 視触覚センサ応用のための透過・不透過素材の特性を利用した物体姿勢推定. MIRU2020, 2020.
[Bib TeX]
@misc{2020005443,
author = {勝田 顕光},
date = {2020-08-04},
title = {視触覚センサ応用のための透過・不透過素材の特性を利用した物体姿勢推定},
howpublished = {MIRU2020},
language = {Japanese},
abstract =
{ロボットが物体を操作する際、把持中の物体の姿勢推定が必要となる。近年、ゲル等の透明柔軟素材に物体を押し付け、その素材越しにカメラで物体を観測することで姿勢推定する視触覚センサが応用され始めている。このセンサは透明素材をそのまま用いるものと、表面を不透明に加工したものに大別される。本発表では、透明素材部分と不透明素材も併用することで、テクスチャとシルエットの情報をロバストに得ることができるセンサを試作した。両素材の特性を生かすことで、姿勢の推定精度が向上することを示す。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[2]廣瀬 颯太, 堂前 幸康, 植芝 俊夫. 目標物体が隠された環境におけるRGBD画像を用いたpick-and-place. 第23回 画像の認識・理解シンポジウム, 2020.
▶︎[Bib TeX]
[2]廣瀬 颯太, 堂前 幸康, 植芝 俊夫. 目標物体が隠された環境におけるRGBD画像を用いたpick-and-place. 第23回 画像の認識・理解シンポジウム,
2020.
[Bib TeX]
@misc{2020005463,
author = {{廣瀬 颯太, 堂前 幸康, 植芝 俊夫}},
date = {2020-08-03},
title = {目標物体が隠された環境における{RGBD}画像を用いたPick-and-Place},
howpublished = {第23回 画像の認識・理解シンポジウム},
language = {Japanese},
abstract =
{ロボットが物体を掴み移動させるPick-and-Placeと呼ばれる作業を実現するためには、物体の位置や姿勢の認識が不可欠であるが、物体同士が重なり合って、目標とする対象物が見えない状態が発生する。本研究では、作業空間内において目標物体が隠されている環境おいて目標物体を探索し、Pick-and-Placeを行う。提案手法では入力をRGBD画像としQ学習にニューラルネットワークを用いるDeep
Q-Network(DQN)を用いる強化学習手法により物体の探索及び把持動作自立的な切り替えを実現する。従来手法では探索時に手先にグリッパーが搭載されたマニピュレータを用いた押し動作と把持動作を使用していたが、提案手法ではそれらに加えて払い動作を追加し従来手法と比較して高速な探索及び目標物体把持を実現する。本稿では、提案システム構成及びシミュレーション環境、実機環境について述べる},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[3]牧原 昂志. Stiffness mapを適用した柔軟物体のための把持位置検出. 第23回 画像の認識・理解シンポジウム, 2020.
▶︎[Bib TeX]
[3]牧原 昂志. Stiffness mapを適用した柔軟物体のための把持位置検出. 第23回 画像の認識・理解シンポジウム, 2020.
[Bib TeX]
@misc{2020005529,
author = {牧原 昂志},
date = {2020-08-02},
title = {Stiffness mapを適用した柔軟物体のための把持位置検出},
howpublished = {第23回 画像の認識・理解シンポジウム},
language = {Japanese},
abstract =
{本論文では,柔軟物体の変形しやすさを考慮したロボットによる把持位置検出手法を提案する.柔軟物体の把持について,例えばコンビニエンスストアに並ぶ食料品などは中身が壊れず,外部が損傷を起こさないような把持位置が必要である.今回は変形しやすさを表したStiffnessmapを手動で作成し,画像ベースの把持位置検出手法に適用した.数種類の柔軟物体に対し,提案手法の有用性を実機実験により検証し評価を行なった.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[4]Luis Enrique Coronado Zuniga, Venture Gentiane, Natsuki Yamanobe. Applying kansei/affective engineering
methodologies in the design of social and service robots: A systematic review. International Journal of
Social Robotics, 13:1161–1171, 2020.
▶︎[Bib TeX]
[4]Luis Enrique Coronado Zuniga, Venture Gentiane, Natsuki Yamanobe. Applying
kansei/affective engineering methodologies in the design of social and service robots: A systematic
review. International Journal of Social Robotics, 13:1161–1171, 2020.
[Bib TeX]
@article{2020000398,
author = {{Luis Enrique Coronado Zuniga, Venture Gentiane, Natsuki Yamanobe}},
title = {Applying Kansei/Affective Engineering Methodologies in the Design of Social and Service
Robots: {A} Systematic Review},
doi = {10.1007/s12369-020-00709-x},
issn = {1875-4791},
language = {English},
pages = {1161--1171},
volume = {13},
creationdate = {2022-08-24T15:57:50},
journal = {International Journal of Social Robotics},
publisher = {SPRINGER},
year = {2020}
}
[5]Yukiyasu Domae, Akio Noda, Tatsuya Nagatani, and Weiwei Wan. Robotic general parts feeder: Bin-picking,
regrasping, and kitting. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages
5004–5010. IEEE, 2020.
▶︎[Bib TeX]
[5]Yukiyasu Domae, Akio Noda, Tatsuya Nagatani, and Weiwei Wan. Robotic general parts
feeder: Bin-picking, regrasping, and kitting. In 2020 IEEE International Conference on Robotics and
Automation (ICRA), pages 5004–5010. IEEE, 2020.
[Bib TeX]
@inproceedings{2020000956,
author = {Yukiyasu Domae and Akio Noda and Tatsuya Nagatani and Weiwei Wan},
booktitle = {2020 {IEEE} International Conference on Robotics and Automation ({ICRA})},
date = {2020-05},
title = {Robotic General Parts Feeder: Bin-picking, Regrasping, and Kitting},
doi = {10.1109/ICRA40945.2020.9197056},
language = {English},
pages = {5004--5010},
publisher = {IEEE},
url = {https://www.semanticscholar.org/paper/696020b8be6ad57ff8451b97c9fa3d8a680f36b3},
venue = {2020 IEEE International Conference on Robotics and Automation (ICRA)},
abstract = {The automatic parts feeding of multiple objects is an unsolved problem in manufacturing
industry. In this paper, we tackle the problem by proposing a multi-robot system. The system
comprises
three subcomponents which perform bin-picking, regrasping, and kitting, respectively. The three
subcomponents divide and conquer the automatic multiple parts feeding problem by considering a
coarse-to-fine manipulation process. Multiple robot arms are connected in series as a pipeline. The
robots are separated into three groups to perform the roles of each subcomponent. The accuracy of
the
state and manipulation are getting higher along with the changes of the subcomponents in the
pipeline.
In the experimental section, the performance of the system is evaluated by using the Mean Picks Per
Hour (MPPH) and successful rate, and is compared to traditional parts feeder and manual labor. The
results show that the Mean Picks Per Hour (MPPH) of the proposed is 351 with eleven various-shaped
industrial parts, which is faster than the state-of-the-art robotic bin-picking system. The lead
time
of the proposed system for new parts is less than by a combination of traditional parts feeders or
manual labor.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[6]Jingren Xu, Kensuke Harada, Weiwei Wan, Toshio Ueshiba, and Yukiyasu Domae. Planning an efficient and
robust base sequence for a mobile manipulator performing multiple pick-and-place tasks. In Proceedings of
International Conference on Robotics and Automation, pages 11018–11024. IEEE Robotics and Automation
Society, January 2020.
▶︎[Bib TeX]
[6]Jingren Xu, Kensuke Harada, Weiwei Wan, Toshio Ueshiba, and Yukiyasu Domae.
Planning
an efficient and robust base sequence for a mobile manipulator performing multiple pick-and-place
tasks.
In Proceedings of International Conference on Robotics and Automation, pages 11018–11024. IEEE
Robotics and Automation Society, January 2020.
[Bib TeX]
@inproceedings{2020001722,
author = {Jingren Xu and Kensuke Harada and Weiwei Wan and Toshio Ueshiba and Yukiyasu Domae},
booktitle = {Proceedings of International Conference on Robotics and Automation},
title = {Planning an Efficient and Robust Base Sequence for a Mobile Manipulator Performing Multiple
Pick-and-place Tasks},
year = {2020},
month = jan,
pages = {11018--11024},
publisher = {IEEE Robotics and Automation Society},
abstract = {In this paper, we address efficiently and robustly collecting objects stored in
different
trays using a mobile manipulator. A resolution complete method, based on precomputed reachability
database, is proposed to explore collision-free inverse kinematics (IK) solutions and then a
resolution complete set of feasible base positions can be determined. This method approximates a set
of representative IK solutions that are especially helpful when solving IK and checking collision
are
treated separately. For real world applications, we take into account the base positioning
uncertainty
and plan a sequence of base positions that reduce the number of necessary base movements for
collecting the target objects, the base sequence is robust in that the mobile manipulator is able to
complete the part-supply task even there is certain deviation from the planned base positions. Our
experiments demonstrate both the efficiency compared to regular base sequence and the feasibility in
real world applications.},
archiveprefix = {arXiv},
creationdate = {2022-08-24T15:57:50},
date = {2020-05},
doi = {10.1109/ICRA40945.2020.9196999},
eid = {arXiv:2001.08042},
eprint = {2001.08042},
eprinttype = {arXiv},
journal = {arXiv e-prints},
keywords = {Computer Science - Robotics},
primaryclass = {cs.RO},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200108042X},
venue = {2020 IEEE International Conference on Robotics and Automation (ICRA)}
}
[7]福本 靖彦, 山野辺 夏樹, 万 偉偉, 原田 研介. ヒトの作業特性に基づく複数ステップ力制御の最適化. 日本ロボット学会誌, 38(4):89–98, 2020.
▶︎[Bib TeX]
[7]福本 靖彦, 山野辺 夏樹, 万 偉偉, 原田 研介. ヒトの作業特性に基づく複数ステップ力制御の最適化. 日本ロボット学会誌,
38(4):89–98,
2020.
[Bib TeX]
@article{2020004157,
author = {{福本 靖彦, 山野辺 夏樹, 万 偉偉, 原田 研介}},
date = {2020-5},
title = {ヒトの作業特性に基づく複数ステップ力制御の最適化},
issn = {0289-1824},
language = {Japanese},
number = {4},
pages = {89--98},
volume = {38},
creationdate = {2022-08-24T15:57:50},
journal = {日本ロボット学会誌},
publisher = {日本ロボット学会},
year = {2020}
}
[8]高瀬 竜一, 堂前 幸康, 植芝 俊夫. 生産現場ロボットで用いられる距離・画像センサ. In 距離・画像センサの基礎と最先端, pages 20–27. S&T出版株式会社, 2020.
▶︎[Bib TeX]
[8]高瀬 竜一, 堂前 幸康, 植芝 俊夫. 生産現場ロボットで用いられる距離・画像センサ. In 距離・画像センサの基礎と最先端, pages
20–27.
S&T出版株式会社, 2020.
[Bib TeX]
@incollection{2020009631,
author = {{高瀬 竜一, 堂前 幸康, 植芝 俊夫}},
booktitle = {距離・画像センサの基礎と最先端},
date = {-},
title = {生産現場ロボットで用いられる距離・画像センサ},
language = {Japanese},
pages = {20--27},
publisher = {S&T出版株式会社},
abstract = {生産現場に用いられている距離・画像センサの要求特性、課題、現在の課題を解決する技術例を解説する。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[9]RamirezAlpizar Ixchel. 「デザイン思考を取り入れたロボットの社会実装」特集について. In 日本ロボット学会誌, volume 38, pages 2–2. 一般社団法人
日本ロボット学会, 2020.
▶︎[Bib TeX]
[9]RamirezAlpizar Ixchel. 「デザイン思考を取り入れたロボットの社会実装」特集について. In 日本ロボット学会誌, volume 38,
pages
2–2. 一般社団法人 日本ロボット学会, 2020.
[Bib TeX]
@incollection{2021001915,
author = {{RamirezAlpizar Ixchel}},
booktitle = {日本ロボット学会誌},
date = {-},
title = {「デザイン思考を取り入れたロボットの社会実装」特集について},
language = {Japanese},
number = {8},
pages = {2--2},
publisher = {一般社団法人 日本ロボット学会},
volume = {38},
abstract =
{人とロボットが共存せざるを得ない場面が増えてきた中、ロボットの設計にあたっては人間を中心とした考え方である「デザイン思考」が欠かせないポイントになってきている。これに加えて誰も経験したことがないパンデミックが発生し、あらゆる場面でロボットの活躍にさらなる期待感が高まっている。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[10]Yukiyasu Domae, Akio Noda, Tatsuya Nagatani, and Weiwei Wan. Robotic general parts feeder:
Bin-picking,
regrasping, and kitting. IEEE International Conference on Robotics and Automation (ICRA), 2020.
▶︎[Bib TeX]
[10]Yukiyasu Domae, Akio Noda, Tatsuya Nagatani, and Weiwei Wan. Robotic general
parts
feeder: Bin-picking, regrasping, and kitting. IEEE International Conference on Robotics and Automation
(ICRA), 2020.
[Bib TeX]
@misc{2020000955,
author = {Yukiyasu Domae and Akio Noda and Tatsuya Nagatani and Weiwei Wan},
date = {2020-06-02},
title = {Robotic General Parts Feeder: Bin-picking, Regrasping, and Kitting},
doi = {10.1109/ICRA40945.2020.9197056},
howpublished = {IEEE International Conference on Robotics and Automation (ICRA)},
language = {English},
url = {https://www.semanticscholar.org/paper/696020b8be6ad57ff8451b97c9fa3d8a680f36b3},
abstract = {The automatic parts feeding of multiple objects is an unsolved problem in manufacturing
industry. In this paper, we tackle the problem by proposing a multi-robot system. The system
comprises
three subcomponents which perform bin-picking, regrasping, and kitting, respectively. The three
subcomponents divide and conquer the automatic multiple parts feeding problem by considering a
coarse-to-fine manipulation process. Multiple robot arms are connected in series as a pipeline. The
robots are separated into three groups to perform the roles of each subcomponent. The accuracy of
the
state and manipulation are getting higher along with the changes of the subcomponents in the
pipeline.
In the experimental section, the performance of the system is evaluated by using the Mean Picks Per
Hour (MPPH) and successful rate, and is compared to traditional parts feeder and manual labor. The
results show that the Mean Picks Per Hour (MPPH) of the proposed is 351 with eleven various-shaped
industrial parts, which is faster than the state-of-the-art robotic bin-picking system. The lead
time
of the proposed system for new parts is less than by a combination of traditional parts feeders or
manual labor.},
booktitle = {2020 {IEEE} International Conference on Robotics and Automation ({ICRA})},
creationdate = {2022-08-24T15:57:50},
publisher = {{IEEE}},
venue = {2020 IEEE International Conference on Robotics and Automation (ICRA)},
year = {2020}
}
[11]長濱星斗, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介. ロボットによる盛り付けのための食品配置生成.
日本機械学会ロボティクス・メカトロニクス講演会2020, 2020.
▶︎[Bib TeX]
[11]長濱星斗, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介. ロボットによる盛り付けのための食品配置生成.
日本機械学会ロボティクス・メカトロニクス講演会2020, 2020.
[Bib TeX]
@misc{2020005527,
author = {{長濱星斗, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介}},
date = {2020-05-29},
title = {ロボットによる盛り付けのための食品配置生成},
howpublished = {日本機械学会ロボティクス・メカトロニクス講演会2020},
language = {Japanese},
abstract =
{本研究では,機械学習を用いて食品配置に関する知識をロボットに与え,ロボットによる食品配置を行う.配置生成においてはシミュレーション内にて,知識を活用しアルゴリズムにより与えられた食品の配置を生成させる.そして.その配置をロボットが再現する.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[12]高田康太, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介. レシピを基にしたロボットによる料理の作業計画. ロボティクス・メカトロニクス 講演会
2020,
2020.
▶︎[Bib TeX]
[12]高田康太, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介. レシピを基にしたロボットによる料理の作業計画.
ロボティクス・メカトロニクス 講演会 2020, 2020.
[Bib TeX]
@misc{2020005528,
author = {{高田康太, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介}},
date = {2020-05-29},
title = {レシピを基にしたロボットによる料理の作業計画},
howpublished = {ロボティクス・メカトロニクス 講演会 2020},
language = {Japanese},
abstract = {In this paper, we propose a method to plan cooking tasks autonomously based on cooking
recipes. In this research, the actions and objects needed which are not described in the recipe but
need to consider are clarified by expressing cooking tasks using a network structure.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[13]Jingren Xu, Kensuke Harada, Wei Wei Wan, Toshio Ueshiba, Yukiyasu Domae. Planning an efficient and
robust base sequence for a mobile manipulator performing multiple pick-and-place tasks. IEEE International
Conference on Robotics and Automation (ICRA), January 2020.
▶︎[Bib
TeX]
[13]Jingren Xu, Kensuke Harada, Wei Wei Wan, Toshio Ueshiba, Yukiyasu Domae. Planning
an efficient and robust base sequence for a mobile manipulator performing multiple pick-and-place
tasks.
IEEE International Conference on Robotics and Automation (ICRA), January 2020.
[Bib TeX]
@misc{2020008134,
author = {{Jingren Xu, Kensuke Harada, Wei Wei Wan, Toshio Ueshiba, Yukiyasu Domae}},
howpublished = {IEEE International Conference on Robotics and Automation (ICRA)},
month = jan,
title = {Planning an Efficient and Robust Base Sequence for a Mobile Manipulator Performing Multiple
Pick-and-place Tasks},
year = {2020},
abstract = {In this paper, we address efficiently and robustly collecting objects stored in
different
trays using a mobile manipulator. A resolution complete method, based on precomputed reachability
database, is proposed to explore collision-free inverse kinematics (IK) solutions and then a
resolution complete set of feasible base positions can be determined. This method approximates a set
of representative IK solutions that are especially helpful when solving IK and checking collision
are
treated separately. For real world applications, we take into account the base positioning
uncertainty
and plan a sequence of base positions that reduce the number of necessary base movements for
collecting the target objects, the base sequence is robust in that the mobile manipulator is able to
complete the part-supply task even there is certain deviation from the planned base positions. Our
experiments demonstrate both the efficiency compared to regular base sequence and the feasibility in
real world applications.},
archiveprefix = {arXiv},
booktitle = {2020 {IEEE} International Conference on Robotics and Automation ({ICRA})},
creationdate = {2022-08-24T15:57:50},
date = {2020-06-03},
doi = {10.1109/ICRA40945.2020.9196999},
eid = {arXiv:2001.08042},
eprint = {2001.08042},
eprinttype = {arXiv},
journal = {arXiv e-prints},
keywords = {Computer Science - Robotics},
language = {English},
pages = {arXiv:2001.08042},
primaryclass = {cs.RO},
publisher = {{IEEE}},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200108042X},
venue = {2020 IEEE International Conference on Robotics and Automation (ICRA)}
}
[14]Peihao Shi, Zhengtao Hu, Kazuyuki Nagata, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada. An adaptive
pin array fixture to fix multiple parts. ROBOMECH2020, 2020.
▶︎[Bib
TeX]
[14]Peihao Shi, Zhengtao Hu, Kazuyuki Nagata, Weiwei Wan, Yukiyasu Domae, and Kensuke
Harada. An adaptive pin array fixture to fix multiple parts. ROBOMECH2020, 2020.
[Bib TeX]
@misc{2020008136,
author = {Peihao Shi and Zhengtao Hu and Kazuyuki Nagata and Weiwei Wan and Yukiyasu Domae and
Kensuke
Harada},
date = {2020-05-28},
title = {An Adaptive Pin Array Fixture to Fix Multiple Parts},
doi = {10.1299/jsmermd.2020.2a2-k10},
howpublished = {ROBOMECH2020},
language = {English},
url = {https://www.semanticscholar.org/paper/9b0d13ba885ab38b3b4aeb8bce249a259481a8e6},
abstract = {ピンアレイ構成のロボットハンドによる効率的なピッキング作業の実現を提案する},
creationdate = {2022-08-24T15:57:50},
issn = {2424-3124},
journaltitle = {The Proceedings of {JSME} annual Conference on Robotics and Mechatronics
(Robomec)},
number = {0},
pages = {2A2--K10},
publisher = {Japan Society of Mechanical Engineers},
volume = {2020},
year = {2020}
}
[15]Xniyi Zhang, keisuke koyama, Wei Wei Wan, Yukiyasu Domae, Kensuke Harada. Motion generation for
separating tangled objects in robotic bin-picking. システム制御情報学会研究発表講演会 (SCI’20), 2020.
▶︎[Bib TeX]
[15]Xniyi Zhang, keisuke koyama, Wei Wei Wan, Yukiyasu Domae, Kensuke Harada. Motion
generation for separating tangled objects in robotic bin-picking. システム制御情報学会研究発表講演会 (SCI’20),
2020.
[Bib TeX]
@misc{2020008137,
author = {{Xniyi Zhang, keisuke koyama, Wei Wei Wan, Yukiyasu Domae, Kensuke Harada}},
date = {2020-05-23},
title = {Motion Generation for Separating Tangled Objects in Robotic Bin-picking},
howpublished = {システム制御情報学会研究発表講演会 (SCI'20)},
language = {English},
abstract = {絡み合った部品を解く動作の生成手法を提案する。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[16]Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Nishi Takayuki, Kikuchi Shinichi,
Takamitsu Matsubara, Kensuke Harada. Learning contact-rich manipulation tasks with rigid
position-controlled
robots: Learning to force control. International Conference on Intelligent Robots and Systems 2020, 2020.
▶︎[Bib TeX]
[16]Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Nishi Takayuki,
Kikuchi Shinichi, Takamitsu Matsubara, Kensuke Harada. Learning contact-rich manipulation tasks with
rigid position-controlled robots: Learning to force control. International Conference on Intelligent
Robots and Systems 2020, 2020.
[Bib TeX]
@misc{2020008494,
author = {{Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Nishi Takayuki, Kikuchi
Shinichi, Takamitsu Matsubara, Kensuke Harada}},
date = {2020-10-26},
title = {Learning Contact-Rich Manipulation Tasks with Rigid Position-Controlled Robots: Learning to
Force Control},
howpublished = {International Conference on Intelligent Robots and Systems 2020},
language = {English},
abstract = {We proposed both a framework for safely training reinforcement learning methods directly
with a rigid position-controlled manipulator and a method for learning force control parameters of
force-feedback control schemes for rigid robots. We evaluate our methods on contact-rich insertion
tasks. Our results show the effectiveness of our methods both in simulation and on a real
robot.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[17]Natsuki Yamanobe. How should robots move to work together with humans? 23rd CISM IFToMM Symposium on
Robot Design, Dynamics and Control, 2020.
▶︎[Bib TeX]
[17]Natsuki Yamanobe. How should robots move to work together with humans? 23rd CISM
IFToMM Symposium on Robot Design, Dynamics and Control, 2020.
[Bib TeX]
@misc{2020010440,
author = {Natsuki Yamanobe},
date = {2020-09-22},
title = {How should robots move to work together with humans?},
howpublished = {23rd CISM IFToMM Symposium on Robot Design, Dynamics and Control},
language = {English},
abstract = {Recently, co-worker scenarios where robots and humans work together sharing a workspace,
became a topic of great interest also in manufacturing fields. What are the requirements of
co-worker
robots? In addition to the efficiency, robots should behave so as to be perceived safe and
comfortable
by the humans working together with them to ensure their acceptance. In this talk, I would like to
present several works conducted to know what aspects of robot’s behavior affect human’s feelings
from
the viewpoint of common sense, semantics, and more automatic emotions like fear and anxiety. The
framework of a cyber-physical system for human-robot collaboration is also introduced. The whole
situation including environment, humans, and robots is continuously recognized and expressed in a
cyber space, where the next possible situations are simulated, and orders for the robots are planned
for a comfortable collaboration.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[18]高田康太, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介. レシピを基にしたロボットの双腕による料理作業計画.
第38回日本ロボット学会学術講演会(RSJ2020), 2020.
▶︎[Bib TeX]
[18]高田康太, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介.
レシピを基にしたロボットの双腕による料理作業計画. 第38回日本ロボット学会学術講演会(RSJ2020), 2020.
[Bib TeX]
@misc{2020010940,
author = {{高田康太, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介}},
date = {2020-10-10},
title = {レシピを基にしたロボットの双腕による料理作業計画},
howpublished = {第38回日本ロボット学会学術講演会(RSJ2020)},
language = {Japanese},
abstract = {本研究では,料理レシピを基にロボットが料理の作業計画を行う.FOON (Functional Object-Oriented Network) を基にし,レシ
ピに陽に記述されていない情報を自動的に補完するフレームワークにはハンドノードを導入することにより,双腕ロボットが左右の腕を状況に応じて使い分ける作業を計画し,さらに計画された作業を実現するようなロボットの動作を計画する.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[19]世良一成, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介. イラスト入り組立説明書を用いた組立作業シーケンスグラフの生成.
第38回日本ロボット学会学術講演会(RSJ2020), 2020.
▶︎[Bib TeX]
[19]世良一成, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介.
イラスト入り組立説明書を用いた組立作業シーケンスグラフの生成. 第38回日本ロボット学会学術講演会(RSJ2020), 2020.
[Bib TeX]
@misc{2020010941,
author = {{世良一成, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 小山佳祐, 万偉偉, 原田 研介}},
date = {2020-10-10},
title = {イラスト入り組立説明書を用いた組立作業シーケンスグラフの生成},
howpublished = {第38回日本ロボット学会学術講演会(RSJ2020)},
language = {Japanese},
abstract = {本研究では組立説明書に描かれた作業手順を説明するイラストの意味理解を行い,組立作業シーケンスグラフ(以降,ATSG: Assembly Task Sequence Graph)
を構築する.ATSG は,一連の組立作業における対象物体の状態変化と,その物体に対する動作を表現する.本研究では,ATSG
を用いることで,説明書から抽出した各部品数,組立順序の情報のみから,不足情報を自動的に補完し,動作とその順序,組立作業による物体状態変化を表現する.これにより,説明書の曖昧な作業指示を具体化し,ロボットでの組立作業を実現する.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[20]山野辺 夏樹. 次世代協働ロボット. インテリジェント実装技術研究会, 2020.
▶︎[Bib TeX]
[20]山野辺 夏樹. 次世代協働ロボット. インテリジェント実装技術研究会, 2020.
[Bib TeX]
@misc{2020010971,
author = {山野辺 夏樹},
date = {2020-12-11},
title = {次世代協働ロボット},
howpublished = {インテリジェント実装技術研究会},
language = {Japanese},
abstract =
{人とロボットが同じ空間で協調して働くことは製造業においても期待が大きい。人と共に働く場合、ロボットは効率的であるだけでなく一緒に働く人にとっても快適であるように動作する必要がある。本講演では、ロボットと協働する人の感覚理解や協働システム構築に関する我々の取り組みについて紹介する。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[21]堂前 幸康. 産業用ロボットにおけるビジョンとAI活用の最前線. 中国地域産総研技術セミナー, 2020.
▶︎[Bib
TeX]
[21]堂前 幸康. 産業用ロボットにおけるビジョンとAI活用の最前線. 中国地域産総研技術セミナー, 2020.
[Bib TeX]
@misc{2020011422,
author = {堂前 幸康},
date = {2020-10-28},
title = {産業用ロボットにおけるビジョンと{AI}活用の最前線},
howpublished = {中国地域産総研技術セミナー},
language = {Japanese},
abstract = {産業用ロボット向けのビジョン・AI技術の研究動向と実用課題に対する解決手法例を紹介する。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[22]津田 浩平, 永田 和之, 西 卓郎, 大西 謙吾. 機械学習 に よ る コンビニ商品 の 検出. 日本機械学会ロボティクス・メカトロニクス講演会2020, 2020.
▶︎[Bib TeX]
[22]津田 浩平, 永田 和之, 西 卓郎, 大西 謙吾. 機械学習 に よ る コンビニ商品 の 検出. 日本機械学会ロボティクス・メカトロニクス講演会2020,
2020.
[Bib TeX]
@misc{2020011540,
author = {{津田 浩平, 永田 和之, 西 卓郎, 大西 謙吾}},
date = {2020-05-29},
title = {機械学習 に よ る コンビニ商品 の 検出},
howpublished = {日本機械学会ロボティクス・メカトロニクス講演会2020},
language = {Japanese},
abstract =
{本研究では,棚に陳列された商品群の水平方向,垂直方向,奥行方向の整列を基本陳列パターンとして機械学習により認識し,陳列パターンのBBの重なりから個別商品を検出する方法について述べる.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[23]東 和樹, 小山 佳祐, 小澤 隆太, 永田 和之, Wei Wei Wan, 原田 研介. 機能別操作シナジー:対象物操作時の指の機能を考慮したシナジー.
日本機械学会ロボティクス・メカトロニクス講演会2020, 2020.
▶︎[Bib TeX]
[23]東 和樹, 小山 佳祐, 小澤 隆太, 永田 和之, Wei Wei Wan, 原田 研介. 機能別操作シナジー:対象物操作時の指の機能を考慮したシナジー.
日本機械学会ロボティクス・メカトロニクス講演会2020, 2020.
[Bib TeX]
@misc{2020011542,
author = {{東 和樹, 小山 佳祐, 小澤 隆太, 永田 和之, Wei Wei Wan, 原田 研介}},
date = {2020-05-29},
title = {機能別操作シナジー:対象物操作時の指の機能を考慮したシナジー},
howpublished = {日本機械学会ロボティクス・メカトロニクス講演会2020},
language = {Japanese},
abstract = {低次元の制御入力によって器用な操作を可能とするシナジー制御手法を提案する},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[24]津田 浩平, 永田 和之, 大西 謙吾. 小規模店舗の商品陳列における異商品混入の検出. 第38回日本ロボット学会学術講演会, 2020.
▶︎[Bib TeX]
[24]津田 浩平, 永田 和之, 大西 謙吾. 小規模店舗の商品陳列における異商品混入の検出. 第38回日本ロボット学会学術講演会, 2020.
[Bib TeX]
@misc{2020011543,
author = {{津田 浩平, 永田 和之, 大西 謙吾}},
date = {2020-10-10},
title = {小規模店舗の商品陳列における異商品混入の検出},
howpublished = {第38回日本ロボット学会学術講演会},
language = {Japanese},
abstract = {本研究では,棚に陳列された商品群の水平方向,垂直方向,奥行方向の整列を基本陳列パターンとし
て機械学習により認識し,陳列BBの交差から個々の商品を検出して,そのテクスチャ解析から異商品の混入を検出する手法について述べる.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[25]樋口 慶, RamirezAlpizar Georgina Ixchel, Venture Gentiane, 山野辺 夏樹. 仮想環境を用いたロボットの視覚運動学習. SICE SI 2020,
2020.
▶︎[Bib TeX]
[25]樋口 慶, RamirezAlpizar Georgina Ixchel, Venture Gentiane, 山野辺 夏樹.
仮想環境を用いたロボットの視覚運動学習. SICE SI 2020, 2020.
[Bib TeX]
@misc{2020011822,
author = {{樋口 慶, RamirezAlpizar Georgina Ixchel, Venture Gentiane, 山野辺 夏樹}},
date = {2020-12-16},
title = {仮想環境を用いたロボットの視覚運動学習},
howpublished = {SICE SI 2020},
language = {Japanese},
abstract = {A robot can acquire actions to accomplish a target task by learning from human
manipulations. This work aims to build an end-to-end visuo-motor learning system using VR. We have
built an integrated learning system that reflects the motion learning results in the visual feature
learning model. Experimental results show that our system improved the performance of the tasks
evaluated in an unknown virtual environment.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[26]津田 浩平, 永田 和之, 大西 謙吾. 小規模店舗における商品陳列の乱れ検出. SI2020, 2020.
▶︎[Bib
TeX]
[26]津田 浩平, 永田 和之, 大西 謙吾. 小規模店舗における商品陳列の乱れ検出. SI2020, 2020.
[Bib TeX]
@misc{2020012499,
author = {{津田 浩平, 永田 和之, 大西 謙吾}},
date = {2020-12-16},
title = {小規模店舗における商品陳列の乱れ検出},
howpublished = {SI2020},
language = {Japanese},
abstract = {本稿では, 棚に陳列された 商品群の水平方向,垂直方向,奥行方向の 規則的な 整列を基本陳列パターンとして機械学習により認識 し ,その 陳列 Bounding Box
(BB)の交差か ら個々の商品を検出 する.また,検出された個々の商品のテクスチャ解析から異商品の混入,商品の反転を検出する手法について述べる.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[27]Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Kensuke Harada. Learning robotic
peg-in-hole with uncertainty goals. 第21回計測自動制御学会システムインテグレーション部門講演会, 2020.
▶︎[Bib TeX]
[27]Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Kensuke Harada.
Learning robotic peg-in-hole with uncertainty goals. 第21回計測自動制御学会システムインテグレーション部門講演会, 2020.
[Bib TeX]
@misc{2020012547,
author = {{Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Kensuke Harada}},
date = {2020-12-18},
title = {Learning Robotic Peg-In-Hole with Uncertainty Goals},
howpublished = {第21回計測自動制御学会システムインテグレーション部門講演会},
language = {English},
abstract = {The main contribution of this work is a learning-based method to solve peg-in-hole tasks
with hole-position uncertainty. We propose the use of an o -policy model-free reinforcement-learning
method, and we bootstraped the training speed by using several transfer-learning techniques
(sim2real)
and domain randomization.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[28]Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Wei Wei Wan, Kensuke Harada. Topology-based grasp
detection
avoiding entanglement for robotic bin-picking. 計測自動制御学会システムインテグレーション部門講演会, 2020.
▶︎[Bib TeX]
[28]Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Wei Wei Wan, Kensuke Harada.
Topology-based grasp detection avoiding entanglement for robotic bin-picking.
計測自動制御学会システムインテグレーション部門講演会, 2020.
[Bib TeX]
@misc{2021004563,
author = {{Xinyi Zhang, Keisuke Koyama, Yukiyasu Domae, Wei Wei Wan, Kensuke Harada}},
date = {2020-12-25},
title = {Topology-based Grasp Detection Avoiding Entanglement for Robotic Bin-picking},
howpublished = {計測自動制御学会システムインテグレーション部門講演会},
language = {English},
abstract = {トポロジーに基づくバラ積み物体の見えの解析と,それに基づく絡む物体の判定手法を提案する.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[29]Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Wei Wei Wan, Kensuke Harada. Base position planning for a
mobile manipulator to pick-and-transport objects stored in multiple trays.
計測自動制御学会システムインテグレーション部門講演会(SI2020), 2020.
▶︎[Bib TeX]
[29]Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Wei Wei Wan, Kensuke Harada. Base
position planning for a mobile manipulator to pick-and-transport objects stored in multiple trays.
計測自動制御学会システムインテグレーション部門講演会(SI2020), 2020.
[Bib TeX]
@misc{2021004564,
author = {{Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Wei Wei Wan, Kensuke Harada}},
date = {2020-12-15},
title = {Base Position Planning for a Mobile Manipulator to Pick-and-transport Objects Stored in
Multiple Trays},
howpublished = {計測自動制御学会システムインテグレーション部門講演会(SI2020)},
language = {English},
abstract = {複数のトレイに物体が様々な姿勢で投入されている場合,物体を複数操作するためのモバイルマニピュレータの軌道計画方法を提案する.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[30]Jingren Xu, Keisuke Koyama, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada. Designing grippers based
on
model decomposition and primitive fitting. 日本機械学会ロボティクス・メカトロニクス部門講演会(ROBOMEC), 2020.
▶︎[Bib TeX]
[30]Jingren Xu, Keisuke Koyama, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada.
Designing grippers based on model decomposition and primitive fitting.
日本機械学会ロボティクス・メカトロニクス部門講演会(ROBOMEC), 2020.
[Bib TeX]
@misc{2021004565,
author = {Jingren Xu and Keisuke Koyama and Weiwei Wan and Yukiyasu Domae and Kensuke Harada},
date = {2020-05-27},
title = {Designing Grippers Based on Model Decomposition and Primitive Fitting},
doi = {10.1299/jsmermd.2020.1p1-b01},
howpublished = {日本機械学会ロボティクス・メカトロニクス部門講演会(ROBOMEC)},
language = {English},
url = {https://www.semanticscholar.org/paper/68e5f42cbd5460d694cf5225b12ddee7d3a828d1},
abstract = {複数部品の形状認識に基づいた最適なグリッパ形状の自動デザイン手法を提案する.},
creationdate = {2022-08-24T15:57:50},
issn = {2424-3124},
journaltitle = {The Proceedings of {JSME} annual Conference on Robotics and Mechatronics
(Robomec)},
number = {0},
pages = {1P1--B01},
publisher = {Japan Society of Mechanical Engineers},
volume = {2020},
year = {2020}
}
[31]Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Nishi Takayuki, Kikuchi Shinichi,
Takamitsu Matsubara, Kensuke Harada. Learning contact-rich manipulation tasks with rigid
position-controlled
robots: Learning to force control. IEEE Robotics and Automation Letters, 5(4):5709–5716, 2020.
▶︎[Bib TeX]
[31]Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Nishi Takayuki,
Kikuchi Shinichi, Takamitsu Matsubara, Kensuke Harada. Learning contact-rich manipulation tasks with
rigid position-controlled robots: Learning to force control. IEEE Robotics and Automation Letters,
5(4):5709–5716, 2020.
[Bib TeX]
@article{2020008718,
author = {{Cristian Beltran-Hernandez, Petit Damien, Ixchel RamirezAlpizar, Nishi Takayuki, Kikuchi
Shinichi, Takamitsu Matsubara, Kensuke Harada}},
date = {2020-0},
title = {Learning Contact-Rich Manipulation Tasks with Rigid Position-Controlled Robots: Learning to
Force Control},
issn = {2377-3766},
language = {English},
number = {4},
pages = {5709--5716},
volume = {5},
creationdate = {2022-08-24T15:57:50},
journal = {IEEE Robotics and Automation Letters},
publisher = {IEEE-INST ELECTRICAL ELECTRONICS ENGINEERS INC},
year = {2020}
}
[32]Jingren Xu, Kensuke Harada, Weiwei Wan, Toshio Ueshiba, and Yukiyasu Domae. Planning an efficient and
robust base sequence for a mobile manipulator performing multiple pick-and-place tasks. In IEEE
International Conference on Robotics and Automation (ICRA), pages 11018–11024. IEEE, January 2020.
▶︎[Bib TeX]
[32]Jingren Xu, Kensuke Harada, Weiwei Wan, Toshio Ueshiba, and Yukiyasu Domae.
Planning an efficient and robust base sequence for a mobile manipulator performing multiple
pick-and-place tasks. In IEEE International Conference on Robotics and Automation (ICRA), pages
11018–11024. IEEE, January 2020.
[Bib TeX]
@inproceedings{2021004559,
author = {Jingren Xu and Kensuke Harada and Weiwei Wan and Toshio Ueshiba and Yukiyasu Domae},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
title = {Planning an Efficient and Robust Base Sequence for a Mobile Manipulator Performing Multiple
Pick-and-place Tasks},
year = {2020},
month = jan,
pages = {11018--11024},
publisher = {IEEE},
abstract = {In this paper, we address efficiently and robustly collecting objects stored in
different
trays using a mobile manipulator. A resolution complete method, based on precomputed reachability
database, is proposed to explore collision-free inverse kinematics (IK) solutions and then a
resolution complete set of feasible base positions can be determined. This method approximates a set
of representative IK solutions that are especially helpful when solving IK and checking collision
are
treated separately. For real world applications, we take into account the base positioning
uncertainty
and plan a sequence of base positions that reduce the number of necessary base movements for
collecting the target objects, the base sequence is robust in that the mobile manipulator is able to
complete the part-supply task even there is certain deviation from the planned base positions. Our
experiments demonstrate both the efficiency compared to regular base sequence and the feasibility in
real world applications.},
archiveprefix = {arXiv},
creationdate = {2022-08-24T15:57:50},
date = {2020-6},
doi = {10.1109/ICRA40945.2020.9196999},
eid = {arXiv:2001.08042},
eprint = {2001.08042},
eprinttype = {arXiv},
issn = {1050-4729},
journal = {arXiv e-prints},
keywords = {Computer Science - Robotics},
language = {English},
primaryclass = {cs.RO},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200108042X},
venue = {2020 IEEE International Conference on Robotics and Automation (ICRA)}
}
[33]Xniyi Zhang, keisuke koyama, Wei Wei Wan, 堂前 幸康, 原田 研介. Motion generation for separating tangled
objects
in robotic bin-picking. In システム制御情報学会研究発表講演会講演論文集, pages 1176–1179. システム制御情報学会, 2020.
▶︎[Bib TeX]
[33]Xniyi Zhang, keisuke koyama, Wei Wei Wan, 堂前 幸康, 原田 研介. Motion generation for
separating tangled objects in robotic bin-picking. In システム制御情報学会研究発表講演会講演論文集, pages 1176–1179.
システム制御情報学会, 2020.
[Bib TeX]
@inproceedings{2021004568,
author = {{Xniyi Zhang, keisuke koyama, Wei Wei Wan, 堂前 幸康, 原田 研介}},
booktitle = {システム制御情報学会研究発表講演会講演論文集},
date = {2020-5},
title = {Motion Generation for Separating Tangled Objects in Robotic Bin-picking},
language = {Japanese},
pages = {1176--1179},
publisher = {システム制御情報学会},
abstract = {絡み合った部品を解く動作の生成手法を提案する。},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}
[34]Peihao Shi, Zhengtao Hu, Kazuyuki Nagata, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada. An adaptive
pin array fixture to fix multiple parts. In The Proceedings of JSME annual Conference on Robotics and
Mechatronics (Robomec), volume 2020, pages 2A2–K10. The Japan Society of Mechanical Engineers, 2020.
▶︎[Bib TeX]
[34]Peihao Shi, Zhengtao Hu, Kazuyuki Nagata, Weiwei Wan, Yukiyasu Domae, and Kensuke
Harada. An adaptive pin array fixture to fix multiple parts. In The Proceedings of JSME annual
Conference on Robotics and Mechatronics (Robomec), volume 2020, pages 2A2–K10. The Japan Society
of Mechanical Engineers, 2020.
[Bib TeX]
@inproceedings{2021004569,
author = {Peihao Shi and Zhengtao Hu and Kazuyuki Nagata and Weiwei Wan and Yukiyasu Domae and
Kensuke
Harada},
booktitle = {The Proceedings of JSME annual Conference on Robotics and Mechatronics
(Robomec)},
date = {2020-5},
title = {An Adaptive Pin Array Fixture to Fix Multiple Parts},
doi = {10.1299/jsmermd.2020.2a2-k10},
language = {English},
number = {0},
pages = {2A2--K10},
publisher = {The Japan Society of Mechanical Engineers},
url = {https://www.semanticscholar.org/paper/9b0d13ba885ab38b3b4aeb8bce249a259481a8e6},
volume = {2020},
abstract = {ピンアレイ構成のロボットハンドによる効率的なピッキング作業の実現を提案する},
creationdate = {2022-08-24T15:57:50},
issn = {2424-3124},
journaltitle = {The Proceedings of {JSME} annual Conference on Robotics and Mechatronics
(Robomec)},
year = {2020}
}
[35]Jingren Xu, Keisuke Koyama, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada. Designing grippers based
on
model decomposition and primitive fitting,. In The Proceedings of JSME annual Conference on Robotics and
Mechatronics (Robomec), volume 2020, pages 1P1–B01. The Japan Society of Mechanical Engineers, 2020.
▶︎[Bib TeX]
[35]Jingren Xu, Keisuke Koyama, Weiwei Wan, Yukiyasu Domae, and Kensuke Harada.
Designing grippers based on model decomposition and primitive fitting,. In The Proceedings of JSME
annual Conference on Robotics and Mechatronics (Robomec), volume 2020, pages 1P1–B01. The Japan
Society of Mechanical Engineers, 2020.
[Bib TeX]
@inproceedings{2021004570,
author = {Jingren Xu and Keisuke Koyama and Weiwei Wan and Yukiyasu Domae and Kensuke Harada},
booktitle = {The Proceedings of JSME annual Conference on Robotics and Mechatronics
(Robomec)},
date = {2020-5},
title = {Designing Grippers Based on Model Decomposition and Primitive Fitting,},
doi = {10.1299/jsmermd.2020.1p1-b01},
language = {English},
number = {0},
pages = {1P1--B01},
publisher = {The Japan Society of Mechanical Engineers},
url = {https://www.semanticscholar.org/paper/68e5f42cbd5460d694cf5225b12ddee7d3a828d1},
volume = {2020},
abstract = {複数部品の形状認識に基づいた最適なグリッパ形状の自動デザイン手法を提案する.},
creationdate = {2022-08-24T15:57:50},
issn = {2424-3124},
journaltitle = {The Proceedings of {JSME} annual Conference on Robotics and Mechatronics
(Robomec)},
year = {2020}
}
[36]Kazuki Higashi, Keisuke Koyama, Ryuta Ozawa, Kazuyuki Nagata, Weiwei Wan, and Kensuke Harada.
Functionally divided manipulation synergy for controlling multi-fingered hands. IEEE/RSJ International
Conference on Intelligent Robots and Systems (IROS2020), March 2020.
▶︎[Bib TeX]
[36]Kazuki Higashi, Keisuke Koyama, Ryuta Ozawa, Kazuyuki Nagata, Weiwei Wan, and
Kensuke Harada. Functionally divided manipulation synergy for controlling multi-fingered hands.
IEEE/RSJ
International Conference on Intelligent Robots and Systems (IROS2020), March 2020.
[Bib TeX]
@misc{2020011545,
author = {Kazuki Higashi and Keisuke Koyama and Ryuta Ozawa and Kazuyuki Nagata and Weiwei Wan and
Kensuke Harada},
howpublished = {IEEE/RSJ International Conference on Intelligent Robots and Systems
(IROS2020)},
month = mar,
title = {Functionally Divided Manipulation Synergy for Controlling Multi-fingered Hands},
year = {2020},
abstract = {Synergy provides a practical approach for expressing various postures of a
multi-fingered
hand. However, a conventional synergy defined for reproducing grasping postures cannot perform
in-hand
manipulation, e.g., tasks that involve simultaneously grasping and manipulating an object. Locking
the
position of particular fingers of a multi-fingered hand is essential for in-hand manipulation tasks
either to hold an object or to fix unnecessary fingers. When using conventional synergy based
control
to manipulate an object, which requires locking some fingers, the coordination of joints is heavily
restricted, decreasing the dexterity of the hand. We propose a functionally divided manipulation
synergy (FDMS) method, which provides a synergy-based control to achieves both dimensionality
reduction and in-hand manipulation. In FDMS, first, we define the function of each finger of the
hand
as either {"}manipulation{"} or {"}fixed.{"} Then, we apply synergy control only to the fingers
having
the manipulation function, so that dexterous manipulations can be realized with a few control
inputs.
Furthermore, we propose the Synergy Switching Framework as a method for applying a finely defined
FDMS
to sequential task changes. The effectiveness of our method is experimentally verified.},
archiveprefix = {arXiv},
booktitle = {2020 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems
({IROS})},
creationdate = {2022-08-24T15:57:50},
date = {2020-11-25},
doi = {10.1109/IROS45743.2020.9341766},
eid = {arXiv:2003.11699},
eprint = {2003.11699},
eprinttype = {arXiv},
journal = {arXiv e-prints},
keywords = {Computer Science - Robotics},
language = {English},
pages = {arXiv:2003.11699},
primaryclass = {cs.RO},
publisher = {{IEEE}},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200311699H},
venue = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}
}
[37]Kazuki Higashi, Keisuke Koyama, Ryuta Ozawa, Kazuyuki Nagata, Weiwei Wan, and Kensuke Harada.
Functionally divided manipulation synergy for controlling multi-fingered hands. In Proc. of the 2020
IEEE/RSJ International Conference on Intelligent Robots and Systems, page arXiv:2003.11699. IEEE/RSJ,
March
2020.
▶︎[Bib TeX]
[37]Kazuki Higashi, Keisuke Koyama, Ryuta Ozawa, Kazuyuki Nagata, Weiwei Wan, and
Kensuke Harada. Functionally divided manipulation synergy for controlling multi-fingered hands.
In Proc.
of the 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems, page
arXiv:2003.11699.
IEEE/RSJ, March 2020.
[Bib TeX]
@inproceedings{2020011546,
author = {Kazuki Higashi and Keisuke Koyama and Ryuta Ozawa and Kazuyuki Nagata and Weiwei Wan and
Kensuke Harada},
booktitle = {Proc. of the 2020 IEEE/RSJ International Conference on Intelligent Robots and
Systems},
title = {Functionally Divided Manipulation Synergy for Controlling Multi-fingered Hands},
year = {2020},
month = mar,
pages = {arXiv:2003.11699},
publisher = {IEEE/RSJ},
abstract = {Synergy provides a practical approach for expressing various postures of a
multi-fingered
hand. However, a conventional synergy defined for reproducing grasping postures cannot perform
in-hand
manipulation, e.g., tasks that involve simultaneously grasping and manipulating an object. Locking
the
position of particular fingers of a multi-fingered hand is essential for in-hand manipulation tasks
either to hold an object or to fix unnecessary fingers. When using conventional synergy based
control
to manipulate an object, which requires locking some fingers, the coordination of joints is heavily
restricted, decreasing the dexterity of the hand. We propose a functionally divided manipulation
synergy (FDMS) method, which provides a synergy-based control to achieves both dimensionality
reduction and in-hand manipulation. In FDMS, first, we define the function of each finger of the
hand
as either {"}manipulation{"} or {"}fixed.{"} Then, we apply synergy control only to the fingers
having
the manipulation function, so that dexterous manipulations can be realized with a few control
inputs.
Furthermore, we propose the Synergy Switching Framework as a method for applying a finely defined
FDMS
to sequential task changes. The effectiveness of our method is experimentally verified.},
archiveprefix = {arXiv},
creationdate = {2022-08-24T15:57:50},
date = {2020-10},
doi = {10.1109/IROS45743.2020.9341766},
eid = {arXiv:2003.11699},
eprint = {2003.11699},
eprinttype = {arXiv},
journal = {arXiv e-prints},
keywords = {Computer Science - Robotics},
language = {English},
primaryclass = {cs.RO},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200311699H},
venue = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}
}
[38]Cristian C. Beltran-Hernandez, Damien Petit, Ixchel G. Ramirez-Alpizar, and Kensuke Harada. Variable
compliance control for robotic peg-in-hole assembly: A deep reinforcement learning approach. Applied
Sciences-Basel, 10(19):6923, August 2020.
▶︎[Bib TeX]
[38]Cristian C. Beltran-Hernandez, Damien Petit, Ixchel G. Ramirez-Alpizar, and
Kensuke
Harada. Variable compliance control for robotic peg-in-hole assembly: A deep reinforcement learning
approach. Applied Sciences-Basel, 10(19):6923, August 2020.
[Bib TeX]
@article{2020010539,
author = {Cristian C. Beltran-Hernandez and Damien Petit and Ixchel G. Ramirez-Alpizar and Kensuke
Harada},
journal = {Applied Sciences-Basel},
title = {Variable Compliance Control for Robotic Peg-in-Hole Assembly: {A} Deep Reinforcement
Learning
Approach},
year = {2020},
issn = {2076-3417},
month = aug,
number = {19},
pages = {6923},
volume = {10},
abstract = {Industrial robot manipulators are playing a more significant role in modern
manufacturing
industries. Though peg-in-hole assembly is a common industrial task which has been extensively
researched, safely solving complex high precision assembly in an unstructured environment remains an
open problem. Reinforcement Learning (RL) methods have been proven successful in solving
manipulation
tasks autonomously. However, RL is still not widely adopted on real robotic systems because working
with real hardware entails additional challenges, especially when using position-controlled
manipulators. The main contribution of this work is a learning-based method to solve peg-in-hole
tasks
with position uncertainty of the hole. We proposed the use of an off- policy model-free
reinforcement
learning method and bootstrap the training speed by using several transfer learning techniques
(sim2real) and domain randomization. Our proposed learning framework for position-controlled robots
was extensively evaluated on contact-rich insertion tasks on a variety of environments.},
archiveprefix = {arXiv},
creationdate = {2022-08-24T15:57:50},
date = {2020-10},
doi = {10.3390/app10196923},
eid = {arXiv:2008.10224},
eprint = {2008.10224},
journaltitle = {Applied Sciences},
keywords = {Computer Science - Robotics, Computer Science - Machine Learning},
language = {English},
primaryclass = {cs.RO},
publisher = {{MDPI} {AG}},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200810224B}
}
[39]I. Ramirez-Alpizar, Ryosuke Hiraki, and K. Harada. Cooking actions inference based on ingredient’s
physical features. IEEE/SICE International Symposium on System Integration (SII 2021), 2021.
▶︎[Bib TeX]
[39]I. Ramirez-Alpizar, Ryosuke Hiraki, and K. Harada. Cooking actions inference
based
on ingredient’s physical features. IEEE/SICE International Symposium on System Integration (SII 2021),
2021.
[Bib TeX]
@misc{2020010937,
author = {I. Ramirez-Alpizar and Ryosuke Hiraki and K. Harada},
date = {2021-01-12},
title = {Cooking Actions Inference based on Ingredient’s Physical Features},
doi = {10.1109/IEEECONF49454.2021.9382721},
howpublished = {IEEE/SICE International Symposium on System Integration (SII 2021)},
language = {English},
url = {https://www.semanticscholar.org/paper/4269e0ec4e9a9a84105444da4b16969e55d26f6a},
abstract = {Most of the cooking recipes available on the internet describe only major cooking steps,
since the detailed actions are considered to be common knowledge. However, when we want a robot to
cook a meal based on a recipe, we have to give to the robot a step by step plan of each of the tasks
needed to execute one recipe step. In this paper, we developed a framework for inferring the
executable cooking actions of ingredients, in order to compensate for the common knowledge of
humans.
We tuned the existing VGG16 Convolutional Neural Network (CNN) to learn the physical features of
ingredients. Then, we built an inference model for six different cooking actions based on the learnt
physical features of ingredients. The resultant inferred action(s) represents the next possible
action(s) that can be executed. As there can be more than one executable action for the same
ingredient state, we prioritize the cooking actions considering the previously executed action, for
which kind of people the meal is being prepared for and the cooking time allowed. We show
experimental
results on five different types of ingredients that are not contained in the training dataset of the
CNN.},
booktitle = {2021 {IEEE}/{SICE} International Symposium on System Integration ({SII})},
creationdate = {2022-08-24T15:57:50},
publisher = {{IEEE}},
venue = {2021 IEEE/SICE International Symposium on System Integration (SII)},
year = {2021}
}
[40]Akira Nakamura, Natsuki Yamanobe, Ixchel Ramirez Alpizar, Kensuke Harada, and Yukiyasu Domae. Using
various evaluation standards to determine an error recovery process in an automation plant. The 2021
International Conference on Artificial Life and Robotics (ICAROB 2021), 2021.
▶︎[Bib TeX]
[40]Akira Nakamura, Natsuki Yamanobe, Ixchel Ramirez Alpizar, Kensuke Harada, and
Yukiyasu Domae. Using various evaluation standards to determine an error recovery process in an
automation plant. The 2021 International Conference on Artificial Life and Robotics (ICAROB 2021),
2021.
[Bib TeX]
@misc{2021001001,
author = {Akira Nakamura and Natsuki Yamanobe and Ixchel Ramirez Alpizar and Kensuke Harada and
Yukiyasu Domae},
date = {2021-01-23},
title = {Using Various Evaluation Standards to Determine an Error Recovery Process in an Automation
Plant},
doi = {10.5954/ICAROB.2021.OS11-1},
howpublished = {The 2021 International Conference on Artificial Life and Robotics (ICAROB
2021)},
language = {English},
url = {https://www.semanticscholar.org/paper/007e5b95e03bcf6614960c6bfdbab16854767ac9},
abstract = {In an automated plant, an error is more likely to occur in difficult tasks, which are
complicated in nature. Such a task is often re-executed after returning to the previous step when a
large-scale error occurs. Therefore, deciding both the past step that the task should return to and
the recovery planning following this return becomes important. In this study, error recovery
planning
considering these two factors using various evaluation standards is realized.},
creationdate = {2022-08-24T15:57:50},
issn = {2188-7829},
journaltitle = {Proceedings of International Conference on Artificial Life and Robotics},
pages = {321--327},
publisher = {{ALife} Robotics Corporation Ltd.},
volume = {26},
year = {2021}
}
[41]山野辺 夏樹. ロボットを用いた人・機械協調システムに関する実応用研究. 中長期的なロボット研究に資する技術勉強会・意見交換会, 2021.
▶︎[Bib TeX]
[41]山野辺 夏樹. ロボットを用いた人・機械協調システムに関する実応用研究. 中長期的なロボット研究に資する技術勉強会・意見交換会, 2021.
[Bib TeX]
@misc{2021002014,
author = {山野辺 夏樹},
date = {2021-02-22},
title = {ロボットを用いた人・機械協調システムに関する実応用研究},
howpublished = {中長期的なロボット研究に資する技術勉強会・意見交換会},
language = {Japanese},
abstract = {人・機械協調システムに関して、人のスキル理解や心理状態に着目した、知能ロボットの研究開発について紹介する},
creationdate = {2022-08-24T15:57:50},
year = {2021}
}
[42]永田 和之. 指の機能的分割に注目した多指ハンドによる物体操作. 第26回ロボティクスシンポジア, 2021.
▶︎[Bib
TeX]
[42]永田 和之. 指の機能的分割に注目した多指ハンドによる物体操作. 第26回ロボティクスシンポジア, 2021.
[Bib TeX]
@misc{2021002049,
author = {永田 和之},
date = {2021-03-17},
title = {指の機能的分割に注目した多指ハンドによる物体操作},
howpublished = {第26回ロボティクスシンポジア},
language = {Japanese},
abstract = {多指ハンドによる作業をプリミティブ動作の組み合せで記述し,他の作業に再利用可能なソフトウェアモジュールに分解するプロセスについて述べる.},
creationdate = {2022-08-24T15:57:50},
year = {2021}
}
[43]Ixchel G. Ramirez-Alpizar, Ryosuke Hiraki, and Kensuke Harada. Cooking actions inference based on
ingredient’s physical features. In IEEE/SICE International Symposium on System Integration (SII 2021),
pages
195–200. IEEE, 2021.
▶︎[Bib TeX]
[43]Ixchel G. Ramirez-Alpizar, Ryosuke Hiraki, and Kensuke Harada. Cooking actions
inference based on ingredient’s physical features. In IEEE/SICE International Symposium on System
Integration (SII 2021), pages 195–200. IEEE, 2021.
[Bib TeX]
@inproceedings{2020010938,
author = {Ixchel G. Ramirez-Alpizar and Ryosuke Hiraki and Kensuke Harada},
booktitle = {IEEE/SICE International Symposium on System Integration (SII 2021)},
date = {2021-1},
title = {Cooking Actions Inference based on Ingredient’s Physical Features},
doi = {10.1109/IEEECONF49454.2021.9382721},
language = {English},
pages = {195--200},
publisher = {IEEE},
url = {https://www.semanticscholar.org/paper/4269e0ec4e9a9a84105444da4b16969e55d26f6a},
venue = {2021 IEEE/SICE International Symposium on System Integration (SII)},
abstract = {Most of the cooking recipes available on the internet describe only major cooking steps,
since the detailed actions are considered to be common knowledge. However, when we want a robot to
cook a meal based on a recipe, we have to give to the robot a step by step plan of each of the tasks
needed to execute one recipe step. In this paper, we developed a framework for inferring the
executable cooking actions of ingredients, in order to compensate for the common knowledge of
humans.
We tuned the existing VGG16 Convolutional Neural Network (CNN) to learn the physical features of
ingredients. Then, we built an inference model for six different cooking actions based on the learnt
physical features of ingredients. The resultant inferred action(s) represents the next possible
action(s) that can be executed. As there can be more than one executable action for the same
ingredient state, we prioritize the cooking actions considering the previously executed action, for
which kind of people the meal is being prepared for and the cooking time allowed. We show
experimental
results on five different types of ingredients that are not contained in the training dataset of the
CNN.},
creationdate = {2022-08-24T15:57:50},
year = {2021}
}
[44]Akira Nakamura, Natsuki Yamanobe, Ixchel Ramirez Alpizar, Kensuke Harada, and Yukiyasu Domae. Using
various evaluation standards to determine an error recovery process in an automation plant. In PROCEEDINGS
OF THE 2021 INTERNATIONAL CONFERENCE ON ARTIFICIAL LIFE AND ROBOTICS, volume 26, pages 321–327.
ALife
Robotics Corporation Ltd., 2021.
▶︎[Bib TeX]
[44]Akira Nakamura, Natsuki Yamanobe, Ixchel Ramirez Alpizar, Kensuke Harada, and
Yukiyasu Domae. Using various evaluation standards to determine an error recovery process in an
automation plant. In PROCEEDINGS OF THE 2021 INTERNATIONAL CONFERENCE ON ARTIFICIAL LIFE AND ROBOTICS,
volume 26, pages 321–327. ALife Robotics Corporation Ltd., 2021.
[Bib TeX]
@inproceedings{2021001015,
author = {Akira Nakamura and Natsuki Yamanobe and Ixchel Ramirez Alpizar and Kensuke Harada and
Yukiyasu Domae},
booktitle = {PROCEEDINGS OF THE 2021 INTERNATIONAL CONFERENCE ON ARTIFICIAL LIFE AND
ROBOTICS},
date = {2021-1},
title = {Using Various Evaluation Standards to Determine an Error Recovery Process in an Automation
Plant},
doi = {10.5954/ICAROB.2021.OS11-1},
language = {English},
pages = {321--327},
publisher = {{ALife} Robotics Corporation Ltd.},
url = {https://www.semanticscholar.org/paper/007e5b95e03bcf6614960c6bfdbab16854767ac9},
volume = {26},
abstract = {In an automated plant, an error is more likely to occur in difficult tasks, which are
complicated in nature. Such a task is often re-executed after returning to the previous step when a
large-scale error occurs. Therefore, deciding both the past step that the task should return to and
the recovery planning following this return becomes important. In this study, error recovery
planning
considering these two factors using various evaluation standards is realized.},
creationdate = {2022-08-24T15:57:50},
issn = {2188-7829},
journaltitle = {Proceedings of International Conference on Artificial Life and Robotics},
year = {2021}
}
[45]Jingren Xu, Weiwei Wan, Keisuke Koyama, Yukiyasu Domae, and Kensuke Harada. Selecting and designing
grippers for an assembly task in a structured approach. ADVANCED ROBOTICS, 35(6):381–397, March
2021.
▶︎[Bib TeX]
[45]Jingren Xu, Weiwei Wan, Keisuke Koyama, Yukiyasu Domae, and Kensuke Harada.
Selecting and designing grippers for an assembly task in a structured approach. ADVANCED ROBOTICS,
35(6):381–397, March 2021.
[Bib TeX]
@article{2020009089,
author = {Jingren Xu and Weiwei Wan and Keisuke Koyama and Yukiyasu Domae and Kensuke Harada},
journal = {ADVANCED ROBOTICS},
title = {Selecting and Designing Grippers for an Assembly Task in a Structured Approach},
year = {2021},
issn = {0169-1864},
month = mar,
number = {6},
pages = {381--397},
volume = {35},
abstract = {ABSTRACT In this paper, we present a structured approach to selecting and designing a
set
of grippers for an assembly task. Compared to current experience-based gripper design method, our
approach accelerates the design process by automatically generating a set of initial design options
for gripper types and parameters according to the CAD models of assembly components. We use mesh
segmentation techniques to segment the assembly components and fit the segmented parts with shape
primitives, according to the predefined correspondence between shape primitive and gripper type,
suitable gripper types and parameters can be selected and extracted from the fitted shape
primitives.
Moreover, we incorporate the assembly constraints in the further evaluation of the initially
obtained
gripper types and parameters. Considering the affordance of the segmented parts and the collision
avoidance between the gripper and the subassemblies, applicable gripper types and parameters can be
filtered from the initial options. Among the applicable gripper configurations, we further optimize
the number of grippers for performing the assembly task, by exploring the gripper that is able to
handle multiple assembly components during the assembly. Finally, the feasibility of the designed
grippers is experimentally verified by assembling a part of an industrial product. GRAPHICAL
ABSTRACT},
archiveprefix = {arXiv},
creationdate = {2022-08-24T15:57:50},
date = {2021-1},
doi = {10.1080/01691864.2020.1870047},
eid = {arXiv:2003.04087},
eprint = {2003.04087},
eprinttype = {arXiv},
keywords = {Computer Science - Robotics},
language = {English},
primaryclass = {cs.RO},
publisher = {TAYLOR & FRANCIS LTD},
url = {https://ui.adsabs.harvard.edu/abs/2020arXiv200304087X},
venue = {Adv. Robotics}
}
2019年度以前
[1]Kousuke Mano, Takahiro Hasegawa, Takayoshi Yamashita, Hironobu Fujiyoshi, and Yukiyasu Domae. Fast and
precise detection of object grasping positions with eigenvalue templates. IEEE International Conference on
Robotics and Automation(ICRA) 2019, 2019.
▶︎[Bib TeX]
[1]Kousuke Mano, Takahiro Hasegawa, Takayoshi Yamashita, Hironobu Fujiyoshi, and
Yukiyasu Domae. Fast and precise detection of object grasping positions with eigenvalue templates.
IEEE
International Conference on Robotics and Automation(ICRA) 2019, 2019.
[Bib TeX]
@misc{2019001193,
author = {Kousuke Mano and Takahiro Hasegawa and Takayoshi Yamashita and Hironobu Fujiyoshi and
Yukiyasu Domae},
date = {2019-05-20},
title = {Fast and Precise Detection of Object Grasping Positions with Eigenvalue Templates},
doi = {10.1109/ICRA.2019.8793830},
howpublished = {IEEE International Conference on Robotics and Automation(ICRA) 2019},
language = {English},
url = {https://www.semanticscholar.org/paper/4b84e7308d7165a821fd984fe55e841274e73431},
abstract = {we propose a method in which hand templates are represented in compact form for faster
processing by using singular value decomposition. Applying singular value decomposition enables hand
templates to be represented as linear combinations of a small number of eigenvalue templates and
eigenfunctions. Experimental results show that the proposed method reduces computation time by two
thirds while maintaining the same detection accuracy as conventional FGE for both parallel hands and
three-finger hands.},
booktitle = {2019 International Conference on Robotics and Automation ({ICRA})},
creationdate = {2022-08-24T14:40:39},
publisher = {{IEEE}},
venue = {2019 International Conference on Robotics and Automation (ICRA)},
year = {2019}
}
[2]Ryo Matsumura, Yukiyasu Domae, Wei Wei Wan, Kensuke Harada. Learning based robotic bin-picking for
potentially tangled objects. IEEE/RSJ International Conference on Intelligent Robotis and Systems(IROS)
2019, 2019.
▶︎[Bib TeX]
[2]Ryo Matsumura, Yukiyasu Domae, Wei Wei Wan, Kensuke Harada. Learning based robotic
bin-picking for potentially tangled objects. IEEE/RSJ International Conference on Intelligent Robotis
and Systems(IROS) 2019, 2019.
[Bib TeX]
@misc{2019003295,
author = {{Ryo Matsumura, Yukiyasu Domae, Wei Wei Wan, Kensuke Harada}},
howpublished = {IEEE/RSJ International Conference on Intelligent Robotis and Systems(IROS)
2019},
title = {Learning Based Robotic Bin-picking for Potentially Tangled Objects},
year = {2019},
abstract = {In this research, we tackle the challenge of picking only one object from a randomly
stacked pile where the objects can potentially be tangled. No solution has been proposed to solve
this
challenge due to the complexity of picking one and only one object from the bin of tangled objects.
Therefore, we propose a method for avoiding the situation where a robot picks multiple objects. In
our
proposed method, first, grasping candidates poses are computed using the graspability index. Then, a
Convolutional Neural Network (CNN) is trained to predict whether or not the robot can pick one and
only one object from the bin. Additionally, since a physics simulator is used to collect data to
train
the CNN, an automatic picking system can be built. The effectiveness of the proposed method is
confirmed through experiments on robot Nextage and compare with previous bin-picking
methods.},
creationdate = {2022-08-24T14:40:39},
date = {2019-11-03},
doi = {10.1109/IROS40897.2019.8968295},
language = {English},
url = {https://www.semanticscholar.org/paper/ad43f5518f0c243e0dd60cc5c3f5da1a1d19588a},
venue = {2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}
}
[3]西 卓郎, 朝岡 忠, 永田 和之, 堂前 幸康. 陳列物体操作のための配置パターンを考慮した三次元位置姿勢推定手法. 第22回 画像の認識・理解シンポジウム (MIRU2019), 2019.
▶︎[Bib TeX]
[3]西 卓郎, 朝岡 忠, 永田 和之, 堂前 幸康. 陳列物体操作のための配置パターンを考慮した三次元位置姿勢推定手法. 第22回 画像の認識・理解シンポジウム
(MIRU2019), 2019.
[Bib TeX]
@misc{2019010098,
author = {{西 卓郎, 朝岡 忠, 永田 和之, 堂前 幸康}},
date = {2019-07-29},
title = {陳列物体操作のための配置パターンを考慮した三次元位置姿勢推定手法},
howpublished = {第22回 画像の認識・理解シンポジウム (MIRU2019)},
language = {Japanese},
abstract = {本報では店舗などに陳列された物品を効率よく操作するための配置パターンを考慮した三次元位置姿勢推定手法について述べるとともに,多種多様な物品操作の実験施設として2019 年5
月に産総研臨海副都心センター内に開設した小規模店舗模擬環境を紹介する.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
[4]朝岡 忠, 永田 和之, 西 卓郎. 物品群の配列パターンと隙間が把持戦略に及ぼす影響. 日本ロボット学会学術講演会, 2019.
▶︎[Bib TeX]
[4]朝岡 忠, 永田 和之, 西 卓郎. 物品群の配列パターンと隙間が把持戦略に及ぼす影響. 日本ロボット学会学術講演会, 2019.
[Bib TeX]
@misc{2019011476,
author = {{朝岡 忠, 永田 和之, 西 卓郎}},
date = {2019-09-04},
title = {物品群の配列パターンと隙間が把持戦略に及ぼす影響},
howpublished = {日本ロボット学会学術講演会},
language = {Japanese},
abstract =
{コンビニ店舗内の商品は規則的な配列パターンで陳列されている.本研究は,規則的な配列パターンで陳列されている物品の把持戦略に関するものである.本発表では,配列された物品間の隙間が把持戦略に与える影響について人間工学実験により調べた.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[5]Ixchel RamirezAlpizar. Towards an efficient framework for generating robotic assembly motions. IEEE/RSJ
International Conference on Intelligent Robots and Systems, 2019.
▶︎[Bib TeX]
[5]Ixchel RamirezAlpizar. Towards an efficient framework for generating robotic
assembly motions. IEEE/RSJ International Conference on Intelligent Robots and Systems, 2019.
[Bib TeX]
@misc{2019016977,
author = {Ixchel RamirezAlpizar},
date = {2019-11-04},
title = {Towards an efficient framework for generating robotic assembly motions},
howpublished = {IEEE/RSJ International Conference on Intelligent Robots and Systems},
language = {English},
abstract = {Among the tasks that remain done by humans, assembly tasks are particularly difficult to
execute by a robot. We propose two different methods for generating robotic assembly
motions.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[6]長濱星斗, RamirezAlpizar Ixchel, 万偉偉, 原田 研介. ロボットによる調理のための食材配置生成. 第20回計測自動制御学会システムインテグレーション部門講演会, 2019.
▶︎[Bib TeX]
[6]長濱星斗, RamirezAlpizar Ixchel, 万偉偉, 原田 研介. ロボットによる調理のための食材配置生成.
第20回計測自動制御学会システムインテグレーション部門講演会, 2019.
[Bib TeX]
@misc{2019018601,
author = {{長濱星斗, RamirezAlpizar Ixchel, 万偉偉, 原田 研介}},
date = {2019-12-14},
title = {ロボットによる調理のための食材配置生成},
howpublished = {第20回計測自動制御学会システムインテグレーション部門講演会},
language = {Japanese},
abstract = {In this research, we propose a food arrangement method used for cooking robots. We rst
generate a scoring method of arranged food by using Amazon Mechanical Turk. Then, we use a machine
learning technique to generate food arrangement with high score. As an example of food arrangement,
we
consider the fruits arrangement on a white round cake. We verifi ed that our method can generate
fruits arrangements with high score.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
[7]速水友輔, 石 培昊, RamirezAlpizar Ixchel, 万 偉偉, 原田 研介. スナップアセンブリにおける決定木を用いた複数種類のエラーパターン識別.
第20回計測自動制御学会システムインテグレーション部門講演会, 2019.
▶︎[Bib TeX]
[7]速水友輔, 石 培昊, RamirezAlpizar Ixchel, 万 偉偉, 原田 研介.
スナップアセンブリにおける決定木を用いた複数種類のエラーパターン識別.
第20回計測自動制御学会システムインテグレーション部門講演会, 2019.
[Bib TeX]
@misc{2019019598,
author = {{速水友輔, 石 培昊, RamirezAlpizar Ixchel, 万 偉偉, 原田 研介}},
date = {2019-12-14},
title = {スナップアセンブリにおける決定木を用いた複数種類のエラーパターン識別},
howpublished = {第20回計測自動制御学会システムインテグレーション部門講演会},
language = {Japanese},
abstract = {We propose a novel error identi cation method during robotic snap assembly aiming at
automated recovery from error states. In the proposed method, we rst obtain the feature quantities
of
force/torque by using fPCA. Then, we classify these data into successful and several different error
states considering the multi-dimensional feature of the force/torque signal.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[8]Xinyi Zhang, Damien Petit, Yukiyasu Domae, Ixchel RamirezAlpizar, Wei Wei Wan, Kensuke Harada. Error
analysis and adjustment on randomized bin-picking. 第20回計測自動制御学会システムインテグレーション部門講演会, 2019.
▶︎[Bib TeX]
[8]Xinyi Zhang, Damien Petit, Yukiyasu Domae, Ixchel RamirezAlpizar, Wei Wei Wan,
Kensuke Harada. Error analysis and adjustment on randomized bin-picking.
第20回計測自動制御学会システムインテグレーション部門講演会,
2019.
[Bib TeX]
@misc{2019019601,
author = {{Xinyi Zhang, Damien Petit, Yukiyasu Domae, Ixchel RamirezAlpizar, Wei Wei Wan, Kensuke
Harada}},
date = {2019-12-14},
title = {Error Analysis and Adjustment on Randomized Bin-picking},
howpublished = {第20回計測自動制御学会システムインテグレーション部門講演会},
language = {English},
abstract = {In this paper, we propose an automatic method for robotic calibration while the robot is
completing a bin-picking task. Our proposed method can achieve real-time hand-eye calibration by
adjusting the errors of transformation between these two frames during a bin-picking task.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[9]Cristian Beltran, Damien Petit, Ixchel RamirezAlpizar, Takamitsu Matsubara, Kensuke Harada. Hybrid
position-force control with reinforcement learning. 第20回計測自動制御学会システムインテグレーション部門講演会, 2019.
▶︎[Bib TeX]
[9]Cristian Beltran, Damien Petit, Ixchel RamirezAlpizar, Takamitsu Matsubara,
Kensuke
Harada. Hybrid position-force control with reinforcement learning. 第20回計測自動制御学会システムインテグレーション部門講演会,
2019.
[Bib TeX]
@misc{2019019605,
author = {{Cristian Beltran, Damien Petit, Ixchel RamirezAlpizar, Takamitsu Matsubara, Kensuke
Harada}},
date = {2019-12-14},
title = {Hybrid position-force control with reinforcement learning},
howpublished = {第20回計測自動制御学会システムインテグレーション部門講演会},
language = {English},
abstract = {We propose a robotic assembly framework for learning a Reinforcement Learning (RL)
policy
to solve manipulation tasks, which require handling complex contact dynamics with a
position-controlled robot and a Force-Torque sensor. A traditional force controller is combined with
an RL policy to achieve parallel hybrid position-force control.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[10]佐藤優也, RamirezAlpizar Ixchel, 酒田信親, 万 偉偉, 原田 研介. テンプレートを利用した光沢物体の姿勢推定. 第37回日本ロボット学会学術講演会, 2019.
▶︎[Bib TeX]
[10]佐藤優也, RamirezAlpizar Ixchel, 酒田信親, 万 偉偉, 原田 研介. テンプレートを利用した光沢物体の姿勢推定.
第37回日本ロボット学会学術講演会, 2019.
[Bib TeX]
@misc{2019019642,
author = {{佐藤優也, RamirezAlpizar Ixchel, 酒田信親, 万 偉偉, 原田 研介}},
date = {2019-09-04},
title = {テンプレートを利用した光沢物体の姿勢推定},
howpublished = {第37回日本ロボット学会学術講演会},
language = {Japanese},
abstract = {本研究では,対象物の見え方を利用した姿勢推定を行うためにテンプレートマッチングによる姿勢推定を行う.このような問題に対して,敵対的生成ネットワーク(GAN)
を用いてシミュレーション画像から実世界の画像と類似した画像の生成を考える.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[11]Cristian Beltran, Damien Petit, Ixchel RamirezAlpizar, Takamitsu Matsubara, Kensuke Harada.
Reinforcement learning framework for real-world robotic arm. 第37回日本ロボット学会学術講演会, 2019.
▶︎[Bib TeX]
[11]Cristian Beltran, Damien Petit, Ixchel RamirezAlpizar, Takamitsu Matsubara,
Kensuke
Harada. Reinforcement learning framework for real-world robotic arm. 第37回日本ロボット学会学術講演会, 2019.
[Bib TeX]
@misc{2019019646,
author = {{Cristian Beltran, Damien Petit, Ixchel RamirezAlpizar, Takamitsu Matsubara, Kensuke
Harada}},
date = {2019-09-04},
title = {Reinforcement Learning Framework for Real-World Robotic Arm},
howpublished = {第37回日本ロボット学会学術講演会},
language = {English},
abstract = {The goal of this work is to develop a reinforcement learning framework that works with a
real-world robotic arm. We use the algorithm called Guided Policy Search which enables robots to
learn
a control policy efficiently, less interaction is required to learn a complex manipulation task
compare to other RL methods.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[12]Xinyi Zhang, Damien Petit, Yukiyasu Domae, Ixchel RamirezAlpizar, Wei Wei Wan, Kensuke Harada. A
real-time robotic calibration method for vision-based bin-picking. ロボティクス・メカトロニクス 講演会 2019, 2019.
▶︎[Bib TeX]
[12]Xinyi Zhang, Damien Petit, Yukiyasu Domae, Ixchel RamirezAlpizar, Wei Wei Wan,
Kensuke Harada. A real-time robotic calibration method for vision-based bin-picking. ロボティクス・メカトロニクス
講演会
2019, 2019.
[Bib TeX]
@misc{2019019648,
author = {{Xinyi Zhang, Damien Petit, Yukiyasu Domae, Ixchel RamirezAlpizar, Wei Wei Wan, Kensuke
Harada}},
date = {2019-06-06},
title = {A Real-time Robotic Calibration Method for Vision-Based Bin-Picking},
howpublished = {ロボティクス・メカトロニクス 講演会 2019},
language = {English},
abstract = {This research proposed an automatic approach for robotic calibration while the robot is
completing the bin-picking task. Since the performance of vision-based bin-picking task can be
affected by calibration inaccuracies or weak scene processing, we proposed a method to do
calibration
during the bin-picking task in real time.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[13]坂本匠, 原田 研介, 万 偉偉, RamirezAlpizar Ixchel. パレタイジングの軌道計画におけるR-PRM(re-usable probabilistic roadmap) 手法.
ロボティクス・メカトロニクス 講演会 2019, 2019.
▶︎[Bib TeX]
[13]坂本匠, 原田 研介, 万 偉偉, RamirezAlpizar Ixchel. パレタイジングの軌道計画におけるR-PRM(re-usable
probabilistic roadmap) 手法. ロボティクス・メカトロニクス 講演会 2019, 2019.
[Bib TeX]
@misc{2019019649,
author = {{坂本匠, 原田 研介, 万 偉偉, RamirezAlpizar Ixchel}},
date = {2019-06-07},
title = {パレタイジングの軌道計画における{R}-{PRM}(Re-usable Probabilistic Roadmap) 手法},
howpublished = {ロボティクス・メカトロニクス 講演会 2019},
language = {Japanese},
abstract = {This paper proposes a method for trajectory planning for palletizing tasks. Focusing on
the feature of palletizing task where the start and the goal con gurations are similar in most of
the
cases, we consider re-using and slightly modifying the previously constructed roadmap when planning
the robot's trajectory.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[14]平木亮輔, RamirezAlpizar Ixchel, 原田 研介. 食材の物理的特徴の学習に基づく実行可能な調理動作の推定. ロボティクス・メカトロニクス 講演会 2019, 2019.
▶︎[Bib TeX]
[14]平木亮輔, RamirezAlpizar Ixchel, 原田 研介. 食材の物理的特徴の学習に基づく実行可能な調理動作の推定. ロボティクス・メカトロニクス
講演会
2019, 2019.
[Bib TeX]
@misc{2019019650,
author = {{平木亮輔, RamirezAlpizar Ixchel, 原田 研介}},
date = {2019-06-06},
title = {食材の物理的特徴の学習に基づく実行可能な調理動作の推定},
howpublished = {ロボティクス・メカトロニクス 講演会 2019},
language = {Japanese},
abstract = {In many cooking recipes existing on the Web, only general cooking actions are written,
however, for a robot to actually cook, we need to instruct the robot in more detail. As a method of
automatically planning such detailed actions, this paper proposes a method to estimate physical
properties such as shape and size from food images by using CNN and estimate cooking actions based
on
these properties.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[15]堂前 幸康. 機械学習によるロボットマニピュレーション. 秋田県ロボット技術研究会, 2019.
▶︎[Bib
TeX]
[15]堂前 幸康. 機械学習によるロボットマニピュレーション. 秋田県ロボット技術研究会, 2019.
[Bib TeX]
@misc{2019019920,
author = {堂前 幸康},
date = {2019-11-22},
title = {機械学習によるロボットマニピュレーション},
howpublished = {秋田県ロボット技術研究会},
language = {Japanese},
abstract = {ロボットマニピュレーションの現状と、機械学習に基づく応用手法の紹介をおこなう。},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[16]Yukiyasu Domae. AI-based manipulation. ITRI-AIST workshop, 2019.
▶︎[Bib TeX]
[16]Yukiyasu Domae. AI-based manipulation. ITRI-AIST workshop, 2019.
[Bib TeX]
@misc{2019019921,
author = {Yukiyasu Domae},
date = {2019-11-09},
title = {{AI}-based Manipulation},
howpublished = {ITRI-AIST workshop},
language = {English},
abstract = {ロボットマニピュレーションへの機械学習技術の応用事例を紹介する。},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[17]Yukiyasu Domae. Robotic picking for various objects. UK-Japan Robotics and AI Research Collaboration
Workshop, 2019.
▶︎[Bib TeX]
[17]Yukiyasu Domae. Robotic picking for various objects. UK-Japan Robotics and AI
Research Collaboration Workshop, 2019.
[Bib TeX]
@misc{2019019922,
author = {Yukiyasu Domae},
date = {2019-09-18},
title = {Robotic Picking for Various Objects},
howpublished = {UK-Japan Robotics and AI Research Collaboration Workshop},
language = {English},
abstract = {産業用ロボット、機械学習に関するロボットマニピュレーションの研究事例を紹介する。},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[18]Kazuki Higashi, Ryuta Ozawa, Kazuyuki Nagata, Wei Wei Wan, Kensuke Harada. Synergy-based control for
multi-fingered hands using selected joint spaces. IEEE/RSJ International Conference on Intelligent Robots
and Systems (IROS 2019), 2019.
▶︎[Bib TeX]
[18]Kazuki Higashi, Ryuta Ozawa, Kazuyuki Nagata, Wei Wei Wan, Kensuke Harada.
Synergy-based control for multi-fingered hands using selected joint spaces. IEEE/RSJ International
Conference on Intelligent Robots and Systems (IROS 2019), 2019.
[Bib TeX]
@misc{2020001719,
author = {{Kazuki Higashi, Ryuta Ozawa, Kazuyuki Nagata, Wei Wei Wan, Kensuke Harada}},
date = {2019-11-05},
title = {Synergy-based Control for Multi-fingered Hands Using Selected Joint Spaces},
howpublished = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS
2019)},
language = {English},
abstract = {This paper proposes subsynergy which provides a synergy-based control method for
multi-fingered hands under selected joint spaces. Subsynergy is a synergy composed of subsets of
fingers or joints needed for performing a specific task. By using subsynergy, we can perform several
different dexterous tasks by controlling high DOF multi-fingered hand with lower dimensional inputs
compared to than conventional synergies.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[19]山野辺 夏樹. 複雑作業の自動化に向けた力制御. ロボット工学セミナー 第120回 ロボットのための作業・動作計画とその教示支援技術, 2019.
▶︎[Bib TeX]
[19]山野辺 夏樹. 複雑作業の自動化に向けた力制御. ロボット工学セミナー 第120回 ロボットのための作業・動作計画とその教示支援技術, 2019.
[Bib TeX]
@misc{2020004183,
author = {山野辺 夏樹},
date = {2019-06-27},
title = {複雑作業の自動化に向けた力制御},
howpublished = {ロボット工学セミナー 第120回 ロボットのための作業・動作計画とその教示支援技術},
language = {Japanese},
abstract = {ものを操作したり, 環境に働きかけたり, 人は視覚情報に加えて力覚情報を上手く利用して作業を行っています. 力を制御することは,
器用なマニピュレーションを実現するためには必要不可欠であり, 古くから研究が進められてきました.しかしながら, 実現したい作業に対してどのように力制御を用いるかが大きな課題であり,
まだ実用例が少ないのが現状です. 本講演では, 実現したい作業に合わせたコンプライアンス設定等の制御戦略構築を中心に, マニピュレーション分野における力制御研究について概説します.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[20]堂前 幸康, 原田 研介, Wei Wei Wan, 真野 航輔, 花井 亮, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 植芝 俊夫, 高瀬 竜一, 藤吉弘亘.
製造現場でのロボットの自律的な作業を実現するAI技術を開発. 次世代人工知能・ロボット中核技術開発,NEDO, 東京, 2019.
▶︎[Bib TeX]
[20]堂前 幸康, 原田 研介, Wei Wei Wan, 真野 航輔, 花井 亮, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel,
植芝
俊夫, 高瀬 竜一, 藤吉弘亘. 製造現場でのロボットの自律的な作業を実現するAI技術を開発. 次世代人工知能・ロボット中核技術開発,NEDO, 東京, 2019.
[Bib TeX]
@unpublished{2019012787,
author = {{堂前 幸康, 原田 研介, Wei Wei Wan, 真野 航輔, 花井 亮, 山野辺 夏樹, RamirezAlpizar Georgina Ixchel, 植芝 俊夫, 高瀬
竜一, 藤吉弘亘}},
date = {2019-08-29},
title = {製造現場でのロボットの自律的な作業を実現する{AI}技術を開発},
language = {Any},
note = {次世代人工知能・ロボット中核技術開発,NEDO, 東京},
abstract = {国立研究開発法人 産業技術総合研究所【理事長 中鉢 良治】(以下「産総研」という)人工知能研究センター【センター長 辻井 潤一】は、国立大学法人
大阪大学【総長 西尾 章治郎】(以下「阪大」という)、学校法人中部大学【理事長・総長 飯吉 厚夫】(以下「中部大」という)と共同で、国立研究開発法人 新エネルギー・産業技術総合開発機構【理事長 石塚
博昭】(以下「NEDO」という)「次世代人工知能・ロボット中核技術開発」プロジェクトにおいて、自動化が困難な製造現場での作業である部品供給と組み立て作業へのロボット導入を容易にするAI技術を開発した。この技術には?絡み合う部品の供給技術、?道具を使う組み立て作業の計画技術、?視覚に基づく作業の高速化技術を含む。2019年8月29日より順次、本開発成果のソフトウエアを公開する。これらの技術を基に、複雑な作業工程による生産ラインの設計の効率化と作業時間の短縮を図る。},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[21]Kousuke Mano, Takahiro Hasegawa, Takayoshi Yamashita, Hironobu Fujiyoshi, and Yukiyasu Domae. Fast and
precise detection of object grasping positions with eigenvalue templates. In IEEE International Conference
on Robotics and Automation. IEEE, 2019.
▶︎[Bib TeX]
[21]Kousuke Mano, Takahiro Hasegawa, Takayoshi Yamashita, Hironobu Fujiyoshi, and
Yukiyasu Domae. Fast and precise detection of object grasping positions with eigenvalue templates.
In IEEE International Conference on Robotics and Automation. IEEE, 2019.
[Bib TeX]
@inproceedings{2019001194,
author = {Kousuke Mano and Takahiro Hasegawa and Takayoshi Yamashita and Hironobu Fujiyoshi and
Yukiyasu Domae},
booktitle = {IEEE International Conference on Robotics and Automation},
date = {2019-05},
title = {Fast and Precise Detection of Object Grasping Positions with Eigenvalue Templates},
doi = {10.1109/ICRA.2019.8793830},
language = {Japanese},
publisher = {IEEE},
url = {https://www.semanticscholar.org/paper/4b84e7308d7165a821fd984fe55e841274e73431},
venue = {2019 International Conference on Robotics and Automation (ICRA)},
abstract = {we propose a method in which hand templates are represented in compact form for faster
processing by using singular value decomposition. Applying singular value decomposition enables hand
templates to be represented as linear combinations of a small number of eigenvalue templates and
eigenfunctions. Experimental results show that the proposed method reduces computation time by two
thirds while maintaining the same detection accuracy as conventional FGE for both parallel hands and
three-finger hands.},
creationdate = {2022-08-24T14:40:39},
issn = {1050-4729},
year = {2019}
}
[22]東 和樹, 小澤 隆太, 永田 和之, 万 偉偉, 原田 研介. タスク指向形ソフトウェアシナジーを実現する多指ハンド制御システム. システム制御情報学会論文誌, 32(5):218–226,
2019.
▶︎[Bib TeX]
[22]東 和樹, 小澤 隆太, 永田 和之, 万 偉偉, 原田 研介. タスク指向形ソフトウェアシナジーを実現する多指ハンド制御システム. システム制御情報学会論文誌,
32(5):218–226, 2019.
[Bib TeX]
@article{2019001317,
author = {{東 和樹, 小澤 隆太, 永田 和之, 万 偉偉, 原田 研介}},
date = {2019-5},
title = {タスク指向形ソフトウェアシナジーを実現する多指ハンド制御システム},
language = {Japanese},
number = {5},
pages = {218--226},
volume = {32},
creationdate = {2022-08-24T14:40:39},
journal = {システム制御情報学会論文誌},
publisher = {システム制御情報学会},
year = {2019}
}
[23]西 卓郎, 増田 健, 喜多 泰代, 佐藤 雄隆. ホビー用RGB-Dカメラの産業用途適用のためのビジョン技術とその限界. In 画像ラボ, volume 30, pages 1–11.
日本工業出版, 2019.
▶︎[Bib TeX]
[23]西 卓郎, 増田 健, 喜多 泰代, 佐藤 雄隆. ホビー用RGB-Dカメラの産業用途適用のためのビジョン技術とその限界. In 画像ラボ, volume 30,
pages 1–11. 日本工業出版, 2019.
[Bib TeX]
@incollection{2019001386,
author = {{西 卓郎, 増田 健, 喜多 泰代, 佐藤 雄隆}},
booktitle = {画像ラボ},
date = {2019-4},
title = {ホビー用{RGB}-{D}カメラの産業用途適用のためのビジョン技術とその限界},
language = {Japanese},
number = {4},
pages = {1--11},
publisher = {日本工業出版},
volume = {30},
abstract =
{3Dセンサとしてホビー用として普及している安価なRGB-Dカメラを採用した低コストなバラ積み部品組付けシステムの構築を試み,±1mm程度の要求精度を満たすことのできるセンサ校正手法および位置決め手法を開発した.
また評価実験を通して,対象部品,組み付け手順の物理的,光学的特性と,システムを構成する各種センサおよびマニピュレータの特性を相互に一致させることがコスト低減において重要であることを明らかにした.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[24]Masahiro Fujita, Yukiyasu Domae, Ryosuke Kawanishi, Kenta Kato, Koji Shiratsuchi, Rintaro Haraguchi,
Ryosuke Araki, Hironobu Fujiyoshi, Shuichi Akizuki, Manabu Hashimoto, Gustavo A. Garcia, Haruhisa Okuda,
Akio Noda, Tsukasa Ogasawara. Bin-picking robot using a multi-gripper switching strategy based on object
sparseness. In Proceedings of International Conference on Automation Science and Engineering, pages
1540–1547. IEEE, 2019.
▶︎[Bib TeX]
[24]Masahiro Fujita, Yukiyasu Domae, Ryosuke Kawanishi, Kenta Kato, Koji Shiratsuchi,
Rintaro Haraguchi, Ryosuke Araki, Hironobu Fujiyoshi, Shuichi Akizuki, Manabu Hashimoto, Gustavo A.
Garcia, Haruhisa Okuda, Akio Noda, Tsukasa Ogasawara. Bin-picking robot using a multi-gripper
switching
strategy based on object sparseness. In Proceedings of International Conference on Automation Science
and Engineering, pages 1540–1547. IEEE, 2019.
[Bib TeX]
@inproceedings{2019002142,
author = {{Masahiro Fujita, Yukiyasu Domae, Ryosuke Kawanishi, Kenta Kato, Koji Shiratsuchi, Rintaro
Haraguchi, Ryosuke Araki, Hironobu Fujiyoshi, Shuichi Akizuki, Manabu Hashimoto, Gustavo A. Garcia,
Haruhisa Okuda, Akio Noda, Tsukasa Ogasawara}},
booktitle = {Proceedings of International Conference on Automation Science and Engineering},
date = {2019-8},
title = {Bin-picking Robot using a Multi-gripper Switching Strategy based on Object
Sparseness},
language = {English},
pages = {1540--1547},
publisher = {IEEE},
abstract = {We propose a bin-picking robot system for handling a mixture daily items. Three
different
types of gripper are used in the system: vacuum, suction, and two-finger. We also propose a strategy
for gripper combination in bin-picking. The vacuum and suction grippers are used at the early stage
of
picking, while the suction is changed to the two-finger gripper at the final stage. The robot system
performed well in the stowing task of the Amazon Robotics Challenge 2017. The system could pick
18/20
items and obtained the 3rd place in the stowing task.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[25]福本 靖彦, 山野辺 夏樹, 万 偉偉, 原田 研介. ヒトの作業データにおける相関に基づくロボットの力制御設計. 日本機械学会論文集, 85(874), 2019.
▶︎[Bib TeX]
[25]福本 靖彦, 山野辺 夏樹, 万 偉偉, 原田 研介. ヒトの作業データにおける相関に基づくロボットの力制御設計. 日本機械学会論文集, 85(874),
2019.
[Bib TeX]
@article{2019010400,
author = {{福本 靖彦, 山野辺 夏樹, 万 偉偉, 原田 研介}},
date = {2019-6},
title = {ヒトの作業データにおける相関に基づくロボットの力制御設計},
language = {Japanese},
number = {874},
volume = {85},
creationdate = {2022-08-24T14:40:39},
journal = {日本機械学会論文集},
publisher = {日本機械学会},
year = {2019}
}
[26]原田 研介, 万 偉偉, RamirezAlpizar Ixchel, 山野辺 夏樹, 辻徳生. データベースに基づくロボットの作業動作計画. In ロボット学会誌, volume 37, pages
679–682. ロボット学会, 2019.
▶︎[Bib TeX]
[26]原田 研介, 万 偉偉, RamirezAlpizar Ixchel, 山野辺 夏樹, 辻徳生. データベースに基づくロボットの作業動作計画.
In ロボット学会誌,
volume 37, pages 679–682. ロボット学会, 2019.
[Bib TeX]
@incollection{2019012124,
author = {{原田 研介, 万 偉偉, RamirezAlpizar Ixchel, 山野辺 夏樹, 辻徳生}},
booktitle = {ロボット学会誌},
date = {2019-0},
title = {データベースに基づくロボットの作業動作計画},
language = {Japanese},
number = {8},
pages = {679--682},
publisher = {ロボット学会},
volume = {37},
abstract =
{NEDO次世代人工知能・ロボット中核技術開発における,ロボットの作業動作生成に関する取り組みについて紹介する.ヒトが手を用いて知的に作業を行うことができるのは,過去に同様な,あるいは類似した作業を行った記憶を呼び起こし,呼び起こした手の動作を適応的に修正するメカニズムに起因すると考え,ロボットの動作に関するデータを何等かの形でデータベースに蓄積し,必要なときにダウンロードするフレームワークを構築している.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[27]Damien Petit, Ixchel G. Ramirez-Alpizar, Wataru Kamei, Qiming He, and Kensuke Harada. Realizing an
assembly task through virtual capture. In IEEE International Conference on Systems, Man and Cybernetics,
pages 2651–2656. IEEE, 2019.
▶︎[Bib TeX]
[27]Damien Petit, Ixchel G. Ramirez-Alpizar, Wataru Kamei, Qiming He, and Kensuke
Harada. Realizing an assembly task through virtual capture. In IEEE International Conference on
Systems,
Man and Cybernetics, pages 2651–2656. IEEE, 2019.
[Bib TeX]
@inproceedings{2019012128,
author = {Damien Petit and Ixchel G. Ramirez-Alpizar and Wataru Kamei and Qiming He and Kensuke
Harada},
booktitle = {IEEE International Conference on Systems, Man and Cybernetics},
date = {2019-10},
title = {Realizing an assembly task through virtual capture},
doi = {10.1109/SMC.2019.8914044},
language = {English},
pages = {2651--2656},
publisher = {IEEE},
url = {https://www.semanticscholar.org/paper/25ca6e5a428789cbec10ac5bdf6b0c19955dc9b0},
venue = {2019 IEEE International Conference on Systems, Man and Cybernetics (SMC)},
abstract = {In this paper we present a method where the motion and grasping adaptation to robot
motion
is tackle during the motion capture using a virtual environment.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[28]M. Fujita, Y. Domae, A. Noda, G. A. Garcia Ricardez, T. Nagatani, A. Zeng, S. Song, A. Rodriguez,
A. Causo, I. M. Chen, and T. Ogasawara. What are the important technologies for bin picking? technology
analysis of robots in competitions based on a set of performance metrics. ADVANCED ROBOTICS,
34(7-8):560–674, 2019.
▶︎[Bib TeX]
[28]M. Fujita, Y. Domae, A. Noda, G. A. Garcia Ricardez, T. Nagatani, A. Zeng,
S. Song,
A. Rodriguez, A. Causo, I. M. Chen, and T. Ogasawara. What are the important technologies for bin
picking? technology analysis of robots in competitions based on a set of performance metrics. ADVANCED
ROBOTICS, 34(7-8):560–674, 2019.
[Bib TeX]
@article{2019019910,
author = {M. Fujita and Y. Domae and A. Noda and G. A. Garcia Ricardez and T. Nagatani and A. Zeng
and
S. Song and A. Rodriguez and A. Causo and I. M. Chen and T. Ogasawara},
date = {2019-2},
journaltitle = {Advanced Robotics},
title = {What are the Important Technologies for Bin Picking? Technology Analysis of Robots in
Competitions based on a Set of Performance Metrics},
doi = {10.1080/01691864.2019.1698463},
issn = {0169-1864},
language = {English},
number = {7-8},
pages = {560--674},
url = {https://www.semanticscholar.org/paper/88e66cd1b4d0d533b4f65e0ddd45f793532cc7ad},
volume = {34},
abstract = {ABSTRACT Bin picking is still a challenge in robotics, as patent in recent robot
competitions. These competitions are an excellent platform for technology comparisons since some
participants may use state-of-the-art technologies, while others may use conventional ones.
Nevertheless, even though points are awarded or subtracted based on the performance in the frame of
the competition rules, the final score does not directly reflect the suitability of the technology.
Therefore, it is difficult to understand which technologies and their combination are optimal for
various real-world problems. In this paper, we propose a set of performance metrics selected in
terms
of actual field use as a solution to clarify the important technologies in bin picking. Moreover, we
use the selected metrics to compare our four original robot systems, which achieved the best
performance in the Stow task of the Amazon Robotics Challenge 2017. Based on this comparison, we
discuss which technologies are ideal for practical use in bin picking robots in the fields of
factory
and warehouse automation. GRAPHICAL ABSTRACT},
creationdate = {2022-08-24T14:40:39},
journal = {ADVANCED ROBOTICS},
publisher = {TAYLOR & FRANCIS LTD},
venue = {Adv. Robotics},
year = {2019}
}
[29]堂前 幸康, 多田 充徳, 谷川 民生. サイバーフィジカルシステムと人・機械協調. In 日本ロボット学会誌, volume 37, pages 683–686. 日本ロボット学会,
2019.
▶︎[Bib TeX]
[29]堂前 幸康, 多田 充徳, 谷川 民生. サイバーフィジカルシステムと人・機械協調. In 日本ロボット学会誌, volume 37, pages
683–686. 日本ロボット学会, 2019.
[Bib TeX]
@incollection{2019019913,
author = {{堂前 幸康, 多田 充徳, 谷川 民生}},
booktitle = {日本ロボット学会誌},
date = {2019-0},
title = {サイバーフィジカルシステムと人・機械協調},
language = {Japanese},
number = {8},
pages = {683--686},
publisher = {日本ロボット学会},
volume = {37},
abstract = {サイバーフィジカルシステムに関する世の中の動向と、産総研の新拠点サイバーフィジカルシステム研究棟での今後の研究展望を紹介する。},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[30]堂前 幸康. 「産業用ロボットのioT化」特集について. In 日本ロボット学会誌, volume 37, pages 683–686. 日本ロボット学会, 2019.
▶︎[Bib TeX]
[30]堂前 幸康. 「産業用ロボットのioT化」特集について. In 日本ロボット学会誌, volume 37, pages 683–686.
日本ロボット学会, 2019.
[Bib TeX]
@incollection{2019019914,
author = {堂前 幸康},
booktitle = {日本ロボット学会誌},
date = {2019-0},
title = {「産業用ロボットのIo{T}化」特集について},
language = {Japanese},
number = {8},
pages = {683--686},
publisher = {日本ロボット学会},
volume = {37},
abstract = {産業用ロボットにおいて進むIoT化についての特集号掲載の経緯と内容を紹介する},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[31]佐藤優也, 原田 研介, 万偉偉, 酒田信親, RamirezAlpizar Ixchel. バラ積みされた難識別対象物に対する2段階ピッキング手法. In 画像ラボ, volume 30, pages
1–10. 日本工業出版K.K., 2019.
▶︎[Bib TeX]
[31]佐藤優也, 原田 研介, 万偉偉, 酒田信親, RamirezAlpizar Ixchel. バラ積みされた難識別対象物に対する2段階ピッキング手法.
In 画像ラボ, volume 30, pages 1–10. 日本工業出版K.K., 2019.
[Bib TeX]
@incollection{2020000894,
author = {{佐藤優也, 原田 研介, 万偉偉, 酒田信親, RamirezAlpizar Ixchel}},
booktitle = {画像ラボ},
date = {2019-2},
title = {バラ積みされた難識別対象物に対する2段階ピッキング手法},
language = {Japanese},
number = {12},
pages = {1--10},
publisher = {日本工業出版K.K.},
volume = {30},
abstract =
{本研究では、バラ積み状態から直接ピッキングを行うのではなく、バラ積み状態から粗く複数個把持して、一旦作業台に置き、作業台から対象物を一つピッキングする2段階の手法を提案する。作業台に数個を置いた状態では2次元的なRGB情報が安定して取得可能であり、この情報を用いたピッキングが可能であることを示す。},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[32]Kazuki Higashi, Ryuta Ozawa, Kazuyuki Nagata, Wei Wei Wan, Kensuke Harada. Synergy-based control for
multi-fingered hands using selected joint spaces. In Proc. of the IEEE/RSJ International Conference on
Intelligent Robots and Systems 2019. IEEE, 2019.
▶︎[Bib
TeX]
[32]Kazuki Higashi, Ryuta Ozawa, Kazuyuki Nagata, Wei Wei Wan, Kensuke Harada.
Synergy-based control for multi-fingered hands using selected joint spaces. In Proc. of the IEEE/RSJ
International Conference on Intelligent Robots and Systems 2019. IEEE, 2019.
[Bib TeX]
@inproceedings{2020001720,
author = {{Kazuki Higashi, Ryuta Ozawa, Kazuyuki Nagata, Wei Wei Wan, Kensuke Harada}},
booktitle = {Proc. of the IEEE/RSJ International Conference on Intelligent Robots and Systems
2019},
date = {2019-1},
title = {Synergy-based Control for Multi-fingered Hands Using Selected Joint Spaces},
language = {English},
publisher = {IEEE},
abstract = {This paper proposes subsynergy which provides a synergy-based control method for
multi-fingered hands under selected joint spaces. Subsynergy is a synergy composed of subsets of
fingers or joints needed for performing a specific task. By using subsynergy, we can perform several
different dexterous tasks by controlling high DOF multi-fingered hand with lower dimensional inputs
compared to than conventional synergies.},
creationdate = {2022-08-24T14:40:39},
year = {2019}
}
[33]Masahiro Fujita, Yukiyasu Domae, Ryosuke Kawanishi, Gustavo Alfonso Garcia Ricardez, Kenta Kato, Koji
Shiratsuchi, Rintaro Haraguchi, Ryosuke Araki, Hironobu Fujiyoshi, Shuichi Akizuki, Manabu Hashimoto,
Albert
Causo, Akio Noda, Haruhisa Okuda, and Tsukasa Ogasawara. Bin-picking robot using a multi-gripper switching
strategy based on object sparseness. IEEE International Conference on Automation Science and Engineering,
August 2019.
▶︎[Bib TeX]
[33]Masahiro Fujita, Yukiyasu Domae, Ryosuke Kawanishi, Gustavo Alfonso Garcia
Ricardez, Kenta Kato, Koji Shiratsuchi, Rintaro Haraguchi, Ryosuke Araki, Hironobu Fujiyoshi, Shuichi
Akizuki, Manabu Hashimoto, Albert Causo, Akio Noda, Haruhisa Okuda, and Tsukasa Ogasawara. Bin-picking
robot using a multi-gripper switching strategy based on object sparseness. IEEE International
Conference
on Automation Science and Engineering, August 2019.
[Bib TeX]
@misc{2019002141,
author = {Masahiro Fujita and Yukiyasu Domae and Ryosuke Kawanishi and Gustavo Alfonso Garcia
Ricardez
and Kenta Kato and Koji Shiratsuchi and Rintaro Haraguchi and Ryosuke Araki and Hironobu Fujiyoshi
and
Shuichi Akizuki and Manabu Hashimoto and Albert Causo and Akio Noda and Haruhisa Okuda and Tsukasa
Ogasawara},
howpublished = {IEEE International Conference on Automation Science and Engineering},
month = aug,
title = {Bin-picking Robot using a Multi-gripper Switching Strategy based on Object
Sparseness},
year = {2019},
abstract = {Bin picking of various daily items is an important research problem in robotics. If the
target items are diverse, multiple grippers are normally used. A design of gripper combinations
depends not only on the item variations but also on the state of the bins, which changes while
robots
pick items from them. In this paper, we propose a gripper combination strategy to change the gripper
combination during a bin-picking task based on the sparseness of objects inside bins. As an
experiment, we build a robot system which has three different types of grippers. By using the
proposed
combination strategy, the system effectively changed the gripper combination during the task, and
picked 18/20 items to obtain the 3rd place in the Stow task at the Amazon Robotics Challenge 2017.
The
successful picking rate and Mean Picks Per Hour (MPPH) were higher than the 1st place team. In this
paper, we describe the problem, method to switch the combination, system including gripper design
and
recognition algorithm, and experimental results from the competition.},
booktitle = {2019 {IEEE} 15th International Conference on Automation Science and Engineering
({CASE})},
creationdate = {2022-08-24T14:40:39},
date = {2019-08-22},
doi = {10.1109/COASE.2019.8842977},
language = {English},
publisher = {{IEEE}},
url = {https://www.semanticscholar.org/paper/e47a38c28b55318122fe012b9dc7f0165a392cad},
venue = {2019 IEEE 15th International Conference on Automation Science and Engineering (CASE)}
}
[34]Ryo Matsumura, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. Learning based robotic bin-picking for
potentially tangled objects. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems
(IROS), pages 7984–7991. IEEE, November 2019.
▶︎[Bib
TeX]
[34]Ryo Matsumura, Yukiyasu Domae, Weiwei Wan, and Kensuke Harada. Learning based
robotic bin-picking for potentially tangled objects. In 2019 IEEE/RSJ International Conference on
Intelligent Robots and Systems (IROS), pages 7984–7991. IEEE, November 2019.
[Bib TeX]
@inproceedings{2019003296,
author = {Ryo Matsumura and Yukiyasu Domae and Weiwei Wan and Kensuke Harada},
booktitle = {2019 {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems
({IROS})},
title = {Learning Based Robotic Bin-picking for Potentially Tangled Objects},
year = {2019},
month = nov,
pages = {7984--7991},
publisher = {IEEE},
abstract = {In this research, we tackle the challenge of picking only one object from a randomly
stacked pile where the objects can potentially be tangled. No solution has been proposed to solve
this
challenge due to the complexity of picking one and only one object from the bin of tangled objects.
Therefore, we propose a method for avoiding the situation where a robot picks multiple objects. In
our
proposed method, first, grasping candidates poses are computed using the graspability index. Then, a
Convolutional Neural Network (CNN) is trained to predict whether or not the robot can pick one and
only one object from the bin. Additionally, since a physics simulator is used to collect data to
train
the CNN, an automatic picking system can be built. The effectiveness of the proposed method is
confirmed through experiments on robot Nextage and compare with previous bin-picking
methods.},
creationdate = {2022-08-24T14:40:39},
date = {2019-1},
doi = {10.1109/IROS40897.2019.8968295},
language = {English},
url = {https://www.semanticscholar.org/paper/ad43f5518f0c243e0dd60cc5c3f5da1a1d19588a},
venue = {2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}
}
[35]堂前 幸康, 原田 研介. 次世代人工知能技術を搭載したロボット. NEDO AI&ROBOT NEXT シンポジウム, 2020.
▶︎[Bib TeX]
[35]堂前 幸康, 原田 研介. 次世代人工知能技術を搭載したロボット. NEDO AI&ROBOT NEXT シンポジウム, 2020.
[Bib TeX]
@misc{2020000954,
author = {{堂前 幸康, 原田 研介}},
date = {2020-01-17},
title = {次世代人工知能技術を搭載したロボット},
howpublished = {NEDO AI&ROBOT NEXT シンポジウム},
language = {Japanese},
abstract = {NEDO「次世代人工知能・ロボット中核技術開発」で研究開発した産業用ロボット向けのAI技術成果を紹介する。},
creationdate = {2022-08-24T14:40:39},
year = {2020}
}
[36]津田 浩平, 永田 和之, 西 卓郎, 大西 謙吾. 機械学習によるコンビニ商品陳列パターン認識. 自律分散システム・シンポジウム, 2020.
▶︎[Bib TeX]
[36]津田 浩平, 永田 和之, 西 卓郎, 大西 謙吾. 機械学習によるコンビニ商品陳列パターン認識. 自律分散システム・シンポジウム, 2020.
[Bib TeX]
@misc{2020001172,
author = {{津田 浩平, 永田 和之, 西 卓郎, 大西 謙吾}},
date = {2020-01-25},
title = {機械学習によるコンビニ商品陳列パターン認識},
howpublished = {自律分散システム・シンポジウム},
language = {Japanese},
abstract = {コンビニのタスクの自動化を行うためには,商品陳列パターンの認識が重要である.本研究は,コンビニ商品の陳列パターンを機械学習により学習し,認識したので報告する.},
creationdate = {2022-08-24T14:40:39},
year = {2020}
}
[37]Akira Nakamura, Natsuki Yamanobe, RamirezAlpizar Georgina Ixchel, Kensuke Harada, Yukiyasu Domae.
Cost-oriented planning for error recovery in an automation plant. The 2020 International Conference on
Artificial Life and Robotics (ICAROB 2020), 2020.
▶︎[Bib
TeX]
[37]Akira Nakamura, Natsuki Yamanobe, RamirezAlpizar Georgina Ixchel, Kensuke Harada,
Yukiyasu Domae. Cost-oriented planning for error recovery in an automation plant. The 2020
International
Conference on Artificial Life and Robotics (ICAROB 2020), 2020.
[Bib TeX]
@misc{2020001229,
author = {{Akira Nakamura, Natsuki Yamanobe, RamirezAlpizar Georgina Ixchel, Kensuke Harada,
Yukiyasu
Domae}},
date = {2020-01-15},
title = {Cost-oriented Planning for Error Recovery in an Automation Plant},
howpublished = {The 2020 International Conference on Artificial Life and Robotics (ICAROB
2020)},
language = {English},
abstract = {In an automation plant, errors are more likely to occur during complicated tasks. In the
case of a major error, the task is commonly re-executed after returning to the previous step.
Therefore, deciding both the prior step that should be returned to and the recovery approach after
return are important problems to consider. In this paper, cost-oriented planning of error recovery
taking these two factors into account is proposed.},
creationdate = {2022-08-24T14:40:39},
year = {2020}
}
[38]堂前 幸康, 川西亮輔, 白土浩司, 原口林太郎, 藤田正弘, 藤吉弘亘, 山内悠嗣, 山下隆義, 橋本学, 秋月秀一.
挟持グリッパをベースとした混載商品を識別するピッキングロボットの開発. 日本ロボット学会誌, 38(1):1–9, 2020.
▶︎[Bib TeX]
[38]堂前 幸康, 川西亮輔, 白土浩司, 原口林太郎, 藤田正弘, 藤吉弘亘, 山内悠嗣, 山下隆義, 橋本学, 秋月秀一.
挟持グリッパをベースとした混載商品を識別するピッキングロボットの開発. 日本ロボット学会誌, 38(1):1–9, 2020.
[Bib TeX]
@article{2018011940,
author = {{堂前 幸康, 川西亮輔, 白土浩司, 原口林太郎, 藤田正弘, 藤吉弘亘, 山内悠嗣, 山下隆義, 橋本学, 秋月秀一}},
date = {2020-1},
title = {挟持グリッパをベースとした混載商品を識別するピッキングロボットの開発},
issn = {0289-1824},
language = {Japanese},
number = {1},
pages = {1--9},
volume = {38},
creationdate = {2022-08-24T14:40:39},
journal = {日本ロボット学会誌},
publisher = {日本ロボット学会},
year = {2020}
}
[39]Kosuke Fukuda, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, and Kensuke Harada. Assembly motion
recognition framework using only images. In IEEE/SICE International Symposium on System Integration, pages
1242–1247. IEEE, 2020.
▶︎[Bib TeX]
[39]Kosuke Fukuda, Natsuki Yamanobe, Ixchel G. Ramirez-Alpizar, and Kensuke Harada.
Assembly motion recognition framework using only images. In IEEE/SICE International Symposium on
System
Integration, pages 1242–1247. IEEE, 2020.
[Bib TeX]
@inproceedings{2019016889,
author = {Kosuke Fukuda and Natsuki Yamanobe and Ixchel G. Ramirez-Alpizar and Kensuke
Harada},
booktitle = {IEEE/SICE International Symposium on System Integration},
date = {2020-1},
title = {Assembly Motion Recognition Framework Using Only Images},
doi = {10.1109/SII46433.2020.9026247},
language = {English},
pages = {1242--1247},
publisher = {IEEE},
url = {https://www.semanticscholar.org/paper/65ed353d439fca380f9214b81b6aac71e3502de5},
venue = {2020 IEEE/SICE International Symposium on System Integration (SII)},
abstract = {This work proposes a method for recognizing and segmenting assembly tasks into single
motions. We carry out the motion recognition of the segmented motion data by using several Hidden
Markov Models (HMMs) that represent the actions that can be executed with the manipulated
object(s).},
creationdate = {2022-08-24T14:40:39},
year = {2020}
}
[40]Kaidi Nie, Felix von Drigalski, Joshua C. Triyonoputro, Chisato Nakashima, Yoshiya Shibata, Yoshinori
Konishi, Yoshihisa Ijiri, Taku Yoshioka, Yukiyasu Domae, Toshio Ueshiba, Ryuichi Takase, Xinyi Zhang,
Damien
Petit, Ixchel G. Ramirez-Alpizar, Weiwei Wan, and Kensuke Harada. Team O2AS” approach for the
task-board task of the world robot challenge 2018. ADVANCED ROBOTICS, 34(7-8):477–498, 2020.
▶︎[Bib TeX]
[40]Kaidi Nie, Felix von Drigalski, Joshua C. Triyonoputro, Chisato Nakashima,
Yoshiya
Shibata, Yoshinori Konishi, Yoshihisa Ijiri, Taku Yoshioka, Yukiyasu Domae, Toshio Ueshiba, Ryuichi
Takase, Xinyi Zhang, Damien Petit, Ixchel G. Ramirez-Alpizar, Weiwei Wan, and Kensuke Harada. Team
O2AS” approach for the task-board task of the world robot challenge 2018. ADVANCED ROBOTICS,
34(7-8):477–498, 2020.
[Bib TeX]
@article{2020000965,
author = {Kaidi Nie and Felix von Drigalski and Joshua C. Triyonoputro and Chisato Nakashima and
Yoshiya Shibata and Yoshinori Konishi and Yoshihisa Ijiri and Taku Yoshioka and Yukiyasu Domae and
Toshio Ueshiba and Ryuichi Takase and Xinyi Zhang and Damien Petit and Ixchel G. Ramirez-Alpizar and
Weiwei Wan and Kensuke Harada},
date = {2020-3},
journaltitle = {Advanced Robotics},
title = {Team {O2AS}{"} Approach for the Task-board Task of the World Robot Challenge 2018},
doi = {10.1080/01691864.2020.1738270},
issn = {0169-1864},
language = {English},
number = {7-8},
pages = {477--498},
volume = {34},
creationdate = {2022-08-24T14:40:39},
journal = {ADVANCED ROBOTICS},
publisher = {TAYLOR & FRANCIS LTD},
year = {2020}
}
[41]Akira Nakamura, Natsuki Yamanobe, Ixchel Ramirez Alpizar, Kensuke Harada, and Yukiyasu Domae.
Cost-oriented planning for error recovery in an automation plant. Journal of Robotics, Networking and
Artificial Life, 6(4):225–230, 2020.
▶︎[Bib TeX]
[41]Akira Nakamura, Natsuki Yamanobe, Ixchel Ramirez Alpizar, Kensuke Harada, and
Yukiyasu Domae. Cost-oriented planning for error recovery in an automation plant. Journal of Robotics,
Networking and Artificial Life, 6(4):225–230, 2020.
[Bib TeX]
@article{2020002570,
author = {Akira Nakamura and Natsuki Yamanobe and Ixchel Ramirez Alpizar and Kensuke Harada and
Yukiyasu Domae},
journal = {Journal of Robotics, Networking and Artificial Life},
title = {Cost-oriented Planning for Error Recovery in an Automation Plant},
year = {2020},
issn = {2405-9021},
number = {4},
pages = {225--230},
volume = {6},
creationdate = {2022-08-24T14:40:39},
date = {2020-3},
doi = {10.2991/jrnal.k.200222.004},
language = {English},
publisher = {{ALife} Robotics Corporation Ltd.}
}
[42]Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Wei Wei Wan, Kensuke Harada. Base position planning for a
mobile manipulator to pick-and-transport objects stored in multiple trays.
計測自動制御学会システムインテグレーション部門講演会(SI2020), 2020.
▶︎[Bib TeX]
[42]Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Wei Wei Wan, Kensuke Harada. Base
position planning for a mobile manipulator to pick-and-transport objects stored in multiple trays.
計測自動制御学会システムインテグレーション部門講演会(SI2020), 2020.
[Bib TeX]
@misc{2021004564,
author = {{Jingren Xu, Yukiyasu Domae, Toshio Ueshiba, Wei Wei Wan, Kensuke Harada}},
date = {2020-12-15},
title = {Base Position Planning for a Mobile Manipulator to Pick-and-transport Objects Stored in
Multiple Trays},
howpublished = {計測自動制御学会システムインテグレーション部門講演会(SI2020)},
language = {English},
abstract = {複数のトレイに物体が様々な姿勢で投入されている場合,物体を複数操作するためのモバイルマニピュレータの軌道計画方法を提案する.},
creationdate = {2022-08-24T15:57:50},
year = {2020}
}