@article{M8F5EF4E2, title = "Grasping a Target Object in Clutter with an Anthropomorphic Robot Hand via RGB-D Vision Intelligence, Target Path Planning and Deep Reinforcement Learning", journal = "KIPS Transactions on Software and Data Engineering", year = "2022", issn = "2287-5905", doi = "https://doi.org/10.3745/KTSDE.2022.11.9.363", author = "Ga Hyeon Ryu/Ji-Heon Oh/Jin Gyun Jeong/Hwanseok Jung/ Jin Hyuk Lee/Patricio Rivera Lopez/Tae-Seong Kim", keywords = "Anthropomorphic Robot Hand, Reinforcement Learning, Path Planning, Object Detection", abstract = "Grasping a target object among clutter objects without collision requires machine intelligence. Machine intelligence includes environment recognition, target & obstacle recognition, collision-free path planning, and object grasping intelligence of robot hands. In this work, we implement such system in simulation and hardware to grasp a target object without collision. We use a RGB-D image sensor to recognize the environment and objects. Various path-finding algorithms been implemented and tested to find collision-free paths. Finally for an anthropomorphic robot hand, object grasping intelligence is learned through deep reinforcement learning. In our simulation environment, grasping a target out of five clutter objects, showed an average success rate of 78.8%and a collision rate of 34% without path planning. Whereas our system combined with path planning showed an average success rate of 94% and an average collision rate of 20%. In our hardware environment grasping a target out of three clutter objects showed an average success rate of 30% and a collision rate of 97% without path planning whereas our system combined with path planning showed an average success rate of 90% and an average collision rate of 23%. Our results show that grasping a target object in clutter is feasible with vision intelligence, path planning, and deep RL." }