@InProceedings{yoshiyasu:accv:2018, author = {Yoshiyasu, Yusuke and Sagawa, Ryusuke and Ayusawa, Ko and Murai, Akihiko}, title = {Skeleton Transformer Networks:3D Human Pose and Skinned Mesh from single RGB Image}, booktitle = {Asian Conference on Computer Vision}, year = {2018}, address = {Perth, Australia}, month = {December 2-December 6}, url = {https://www.researchgate.net/publication/330036375\_Skeleton\_Transformer\_Networks\_3D\_Human\_Pose\_and\_Skinned\_Mesh\_from\_Single\_RGB\_Image}, keywords = {Convolutional neural networks, 3D human pose, Skeleton}, doi = {10.1007/978-3-030-20870-7\_30}, abstract = {In this paper, we present Skeleton Transformer Networks (SkeletonNet), an end-to-end framework that can predict not only 3D joint positions but also 3D angular pose (bone rotations) of a human skeleton from a single color image. This in turn allows us to generate skinned mesh animations. Here, we propose a two-step regression approach. The first step regresses bone rotations in order to obtain an initial solution by considering skeleton structure. The second step performs refinement based on heatmap regressor using a 3D pose representation called cross heatmap which stacks heatmaps of xy and zy coordinates. By training the network using the proposed 3D human pose dataset that is comprised of images annotated with 3D skeletal angular poses, we showed that SkeletonNet can predict a full 3D human pose (joint positions and bone rotations) from a single image in-the-wild.} }