@InProceedings{yoshiyasu:eurographics:2020, author = {Yoshiyasu, Yusuke and Gamez, Lucas}, title = {Learning Body Shape and Pose from Dense Correspondences}, booktitle = {Conference of the European Association for Computer Graphics}, year = {2020}, address = {Norrk{\"o}ping, Sweden}, month = {May 25-May 29}, note = {ONLINE}, url = {https://diglib.eg.org/bitstream/handle/10.2312/egs20201012/037-040.pdf?sequence=1\&isAllowed=y}, doi = {https://doi.org/10.2312/egs.20201012}, abstract = {In this paper, we address the problem of learning 3D human pose and body shape from 2D image dataset, without having to use 3D supervisions (body shape and pose) which are in practice difficult to obtain. The idea is to use dense correspondences between image points and a body surface, which can be annotated on in-the-wild 2D images, to extract, aggregate and learn 3D information such as body shape and pose from them. To do so, we propose a training strategy called "deform-and-learn" where we alternate deformable surface registration and training of deep convolutional neural networks (ConvNets). Experimental results showed that our method is comparable to previous semi-supervised techniques that use 3D supervision.} }