Commit 90f6a005 authored by Nikhila Ravi's avatar Nikhila Ravi Committed by Facebook GitHub Bot
Browse files

Tutorials textures updates and fix bug in extending meshes with uv textures

Summary:
Found a bug in extending textures with vertex uv coordinates. This was due to the padded -> list conversion of vertex uv coordinates i.e.                 The number of vertices in the mesh and in verts_uvs can differ
e.g. if a vertex is shared between 3 faces, it can
have up to 3 different uv coordinates. Therefore we cannot convert directly from padded to list using _num_verts_per_mesh

Reviewed By: bottler

Differential Revision: D23233595

fbshipit-source-id: 0c66d15baae697ead0bdc384f74c27d4c6539fc9
parent d3307658
{ {
"nbformat": 4, "cells": [
"nbformat_minor": 0, {
"metadata": { "cell_type": "code",
"accelerator": "GPU", "execution_count": null,
"bento_stylesheets": { "metadata": {
"bento/extensions/flow/main.css": true, "colab": {},
"bento/extensions/kernel_selector/main.css": true, "colab_type": "code",
"bento/extensions/kernel_ui/main.css": true, "id": "_Ip8kp4TfBLZ"
"bento/extensions/new_kernel/main.css": true, },
"bento/extensions/system_usage/main.css": true, "outputs": [],
"bento/extensions/theme/main.css": true "source": [
}, "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
"colab": { ]
"name": "fit_textured_mesh.ipynb", },
"provenance": [], {
"toc_visible": true "cell_type": "markdown",
}, "metadata": {
"kernelspec": { "colab_type": "text",
"display_name": "Python 3", "id": "kuXHJv44fBLe"
"language": "python", },
"name": "python3" "source": [
}, "# Fit a mesh via rendering\n",
"language_info": { "\n",
"codemirror_mode": { "This tutorial shows how to:\n",
"name": "ipython", "- Load a mesh and textures from an `.obj` file. \n",
"version": 3 "- Create a synthetic dataset by rendering a textured mesh from multiple viewpoints\n",
}, "- Fit a mesh to the observed synthetic images using differential silhouette rendering\n",
"file_extension": ".py", "- Fit a mesh and its textures using differential textured rendering"
"mimetype": "text/x-python", ]
"name": "python", },
"nbconvert_exporter": "python", {
"pygments_lexer": "ipython3", "cell_type": "markdown",
"version": "3.8.3" "metadata": {
} "colab_type": "text",
}, "id": "Bnj3THhzfBLf"
"cells": [ },
{ "source": [
"cell_type": "code", "## 0. Install and Import modules"
"metadata": { ]
"colab_type": "code", },
"id": "_Ip8kp4TfBLZ", {
"colab": {} "cell_type": "markdown",
}, "metadata": {
"source": [ "colab_type": "text",
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved." "id": "okLalbR_g7NS"
], },
"execution_count": null, "source": [
"outputs": [] "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:"
}, ]
{ },
"cell_type": "markdown", {
"metadata": { "cell_type": "code",
"colab_type": "text", "execution_count": null,
"id": "kuXHJv44fBLe" "metadata": {
}, "colab": {},
"source": [ "colab_type": "code",
"# Fit a mesh via rendering\n", "id": "musUWTglgxSB"
"\n", },
"This tutorial shows how to:\n", "outputs": [],
"- Load a mesh and textures from an `.obj` file. \n", "source": [
"- Create a synthetic dataset by rendering a textured mesh from multiple viewpoints\n", "!pip install torch torchvision\n",
"- Fit a mesh to the observed synthetic images using differential silhouette rendering\n", "!pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
"- Fit a mesh and its textures using differential textured rendering" ]
] },
}, {
{ "cell_type": "code",
"cell_type": "markdown", "execution_count": null,
"metadata": { "metadata": {
"colab_type": "text", "colab": {},
"id": "Bnj3THhzfBLf" "colab_type": "code",
}, "id": "nX99zdoffBLg"
"source": [ },
"## 0. Install and Import modules" "outputs": [],
] "source": [
}, "import os\n",
{ "import torch\n",
"cell_type": "markdown", "import matplotlib.pyplot as plt\n",
"metadata": { "from skimage.io import imread\n",
"colab_type": "text", "\n",
"id": "okLalbR_g7NS" "from pytorch3d.utils import ico_sphere\n",
}, "import numpy as np\n",
"source": [ "from tqdm.notebook import tqdm\n",
"If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" "\n",
] "# Util function for loading meshes\n",
}, "from pytorch3d.io import load_objs_as_meshes, save_obj\n",
{ "\n",
"cell_type": "code", "from pytorch3d.loss import (\n",
"metadata": { " chamfer_distance, \n",
"colab_type": "code", " mesh_edge_loss, \n",
"id": "musUWTglgxSB", " mesh_laplacian_smoothing, \n",
"colab": {} " mesh_normal_consistency,\n",
}, ")\n",
"source": [ "\n",
"!pip install torch torchvision\n", "# Data structures and functions for rendering\n",
"!pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'" "from pytorch3d.structures import Meshes\n",
], "from pytorch3d.renderer import (\n",
"execution_count": null, " look_at_view_transform,\n",
"outputs": [] " OpenGLPerspectiveCameras, \n",
}, " PointLights, \n",
{ " DirectionalLights, \n",
"cell_type": "code", " Materials, \n",
"metadata": { " RasterizationSettings, \n",
"colab_type": "code", " MeshRenderer, \n",
"id": "nX99zdoffBLg", " MeshRasterizer, \n",
"colab": {} " SoftPhongShader,\n",
}, " SoftSilhouetteShader,\n",
"source": [ " SoftPhongShader,\n",
"import os\n", " TexturesVertex\n",
"import torch\n", ")\n",
"import matplotlib.pyplot as plt\n", "\n",
"from skimage.io import imread\n", "# add path for demo utils functions \n",
"\n", "import sys\n",
"from pytorch3d.utils import ico_sphere\n", "import os\n",
"import numpy as np\n", "sys.path.append(os.path.abspath(''))"
"from tqdm.notebook import tqdm\n", ]
"\n", },
"# Util function for loading meshes\n", {
"from pytorch3d.io import load_objs_as_meshes, save_obj\n", "cell_type": "markdown",
"\n", "metadata": {
"from pytorch3d.loss import (\n", "colab_type": "text",
" chamfer_distance, \n", "id": "Lxmehq6Zhrzv"
" mesh_edge_loss, \n", },
" mesh_laplacian_smoothing, \n", "source": [
" mesh_normal_consistency,\n", "If using **Google Colab**, fetch the utils file for plotting image grids:"
")\n", ]
"\n", },
"# Data structures and functions for rendering\n", {
"from pytorch3d.structures import Meshes, Textures\n", "cell_type": "code",
"from pytorch3d.renderer import (\n", "execution_count": null,
" look_at_view_transform,\n", "metadata": {
" FoVPerspectiveCameras, \n", "colab": {},
" PointLights, \n", "colab_type": "code",
" DirectionalLights, \n", "id": "HZozr3Pmho-5"
" Materials, \n", },
" RasterizationSettings, \n", "outputs": [],
" MeshRenderer, \n", "source": [
" MeshRasterizer, \n", "!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n",
" TexturedSoftPhongShader,\n", "from plot_image_grid import image_grid"
" SoftSilhouetteShader,\n", ]
" SoftPhongShader,\n", },
")\n", {
"\n", "cell_type": "markdown",
"# add path for demo utils functions \n", "metadata": {
"import sys\n", "colab_type": "text",
"import os\n", "id": "g4B62MzYiJUM"
"sys.path.append(os.path.abspath(''))" },
], "source": [
"execution_count": null, "OR if running **locally** uncomment and run the following cell:"
"outputs": [] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "code",
"metadata": { "execution_count": null,
"colab_type": "text", "metadata": {
"id": "Lxmehq6Zhrzv" "colab": {},
}, "colab_type": "code",
"source": [ "id": "paJ4Im8ahl7O"
"If using **Google Colab**, fetch the utils file for plotting image grids:" },
] "outputs": [],
}, "source": [
{ "# from utils.plot_image_grid import image_grid"
"cell_type": "code", ]
"metadata": { },
"colab_type": "code", {
"id": "HZozr3Pmho-5", "cell_type": "markdown",
"colab": {} "metadata": {
}, "colab_type": "text",
"source": [ "collapsed": true,
"!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n", "id": "5jGq772XfBLk"
"from plot_image_grid import image_grid" },
], "source": [
"execution_count": null, "### 1. Load a mesh and texture file\n",
"outputs": [] "\n",
}, "Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. \n",
{ "\n",
"cell_type": "markdown", "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n",
"metadata": { "\n",
"colab_type": "text", "**TexturesVertex** is an auxillary datastructure for storing vertex rgb texture information about meshes. \n",
"id": "g4B62MzYiJUM" "\n",
}, "**Meshes** has several class methods which are used throughout the rendering pipeline."
"source": [ ]
"OR if running **locally** uncomment and run the following cell:" },
] {
}, "cell_type": "markdown",
{ "metadata": {
"cell_type": "code", "colab_type": "text",
"metadata": { "id": "a8eU4zo5jd_H"
"colab_type": "code", },
"id": "paJ4Im8ahl7O", "source": [
"colab": {} "If running this notebook using **Google Colab**, run the following cell to fetch the mesh obj and texture files and save it at the path `data/cow_mesh`:\n",
}, "If running locally, the data is already available at the correct path. "
"source": [ ]
" # from utils.plot_image_grid import image_grid" },
], {
"execution_count": null, "cell_type": "code",
"outputs": [] "execution_count": null,
}, "metadata": {
{ "colab": {},
"cell_type": "markdown", "colab_type": "code",
"metadata": { "id": "tTm0cVuOjb1W"
"colab_type": "text", },
"collapsed": true, "outputs": [],
"id": "5jGq772XfBLk" "source": [
}, "!mkdir -p data/cow_mesh\n",
"source": [ "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj\n",
"### 1. Load a mesh and texture file\n", "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl\n",
"\n", "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png"
"Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. \n", ]
"\n", },
"**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", {
"\n", "cell_type": "code",
"**Textures** is an auxillary datastructure for storing texture information about meshes. \n", "execution_count": null,
"\n", "metadata": {
"**Meshes** has several class methods which are used throughout the rendering pipeline." "colab": {},
] "colab_type": "code",
}, "id": "gi5Kd0GafBLl"
{ },
"cell_type": "markdown", "outputs": [],
"metadata": { "source": [
"colab_type": "text", "# Setup\n",
"id": "a8eU4zo5jd_H" "if torch.cuda.is_available():\n",
}, " device = torch.device(\"cuda:0\")\n",
"source": [ " torch.cuda.set_device(device)\n",
"If running this notebook using **Google Colab**, run the following cell to fetch the mesh obj and texture files and save it at the path `data/cow_mesh`:\n", "else:\n",
"If running locally, the data is already available at the correct path. " " device = torch.device(\"cpu\")\n",
] "\n",
}, "# Set paths\n",
{ "DATA_DIR = \"./data\"\n",
"cell_type": "code", "obj_filename = os.path.join(DATA_DIR, \"cow_mesh/cow.obj\")\n",
"metadata": { "\n",
"colab_type": "code", "# Load obj file\n",
"id": "tTm0cVuOjb1W", "mesh = load_objs_as_meshes([obj_filename], device=device)\n",
"colab": {} "\n",
}, "# We scale normalize and center the target mesh to fit in a sphere of radius 1 \n",
"source": [ "# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh \n",
"!mkdir -p data/cow_mesh\n", "# to its original center and scale. Note that normalizing the target mesh, \n",
"!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj\n", "# speeds up the optimization but is not necessary!\n",
"!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl\n", "verts = mesh.verts_packed()\n",
"!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png" "N = verts.shape[0]\n",
], "center = verts.mean(0)\n",
"execution_count": null, "scale = max((verts - center).abs().max(0)[0])\n",
"outputs": [] "mesh.offset_verts_(-center.expand(N, 3))\n",
}, "mesh.scale_verts_((1.0 / float(scale)));"
{ ]
"cell_type": "code", },
"metadata": { {
"colab_type": "code", "cell_type": "markdown",
"id": "gi5Kd0GafBLl", "metadata": {
"colab": {} "colab_type": "text",
}, "id": "17c4xmtyfBMH"
"source": [ },
"# Setup\n", "source": [
"if torch.cuda.is_available():\n", "## 2. Dataset Creation\n",
" device = torch.device(\"cuda:0\")\n", "\n",
" torch.cuda.set_device(device)\n", "We sample different camera positions that encode multiple viewpoints of the cow. We create a renderer with a shader that performs texture map interpolation. We render a synthetic dataset of images of the textured cow mesh from multiple viewpoints.\n"
"else:\n", ]
" device = torch.device(\"cpu\")\n", },
"\n", {
"# Set paths\n", "cell_type": "code",
"DATA_DIR = \"./data\"\n", "execution_count": null,
"obj_filename = os.path.join(DATA_DIR, \"cow_mesh/cow.obj\")\n", "metadata": {
"\n", "colab": {},
"# Load obj file\n", "colab_type": "code",
"mesh = load_objs_as_meshes([obj_filename], device=device)\n", "id": "CDQKebNNfBMI"
"\n", },
"# We scale normalize and center the target mesh to fit in a sphere of radius 1 \n", "outputs": [],
"# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh \n", "source": [
"# to its original center and scale. Note that normalizing the target mesh, \n", "# the number of different viewpoints from which we want to render the mesh.\n",
"# speeds up the optimization but is not necessary!\n", "num_views = 20\n",
"verts = mesh.verts_packed()\n", "\n",
"N = verts.shape[0]\n", "# Get a batch of viewing angles. \n",
"center = verts.mean(0)\n", "elev = torch.linspace(0, 360, num_views)\n",
"scale = max((verts - center).abs().max(0)[0])\n", "azim = torch.linspace(-180, 180, num_views)\n",
"mesh.offset_verts_(-center.expand(N, 3))\n", "\n",
"mesh.scale_verts_((1.0 / float(scale)));" "# Place a point light in front of the object. As mentioned above, the front of \n",
], "# the cow is facing the -z direction. \n",
"execution_count": null, "lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])\n",
"outputs": [] "\n",
}, "# Initialize an OpenGL perspective camera that represents a batch of different \n",
{ "# viewing angles. All the cameras helper methods support mixed type inputs and \n",
"cell_type": "markdown", "# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n",
"metadata": { "# then specify elevation and azimuth angles for each viewpoint as tensors. \n",
"colab_type": "text", "R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n",
"id": "17c4xmtyfBMH" "cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)\n",
}, "\n",
"source": [ "# We arbitrarily choose one particular view that will be used to visualize \n",
"## 2. Dataset Creation\n", "# results\n",
"\n", "camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...], \n",
"We sample different camera positions that encode multiple viewpoints of the cow. We create a renderer with a shader that performs texture map interpolation. We render a synthetic dataset of images of the textured cow mesh from multiple viewpoints.\n" " T=T[None, 1, ...]) \n",
] "\n",
}, "# Define the settings for rasterization and shading. Here we set the output \n",
{ "# image to be of size 128X128. As we are rendering images for visualization \n",
"cell_type": "code", "# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to \n",
"metadata": { "# rasterize_meshes.py for explanations of these parameters. We also leave \n",
"colab_type": "code", "# bin_size and max_faces_per_bin to their default values of None, which sets \n",
"id": "CDQKebNNfBMI", "# their values using huristics and ensures that the faster coarse-to-fine \n",
"colab": {} "# rasterization method is used. Refer to docs/notes/renderer.md for an \n",
}, "# explanation of the difference between naive and coarse-to-fine rasterization. \n",
"source": [ "raster_settings = RasterizationSettings(\n",
"# the number of different viewpoints from which we want to render the mesh.\n", " image_size=128, \n",
"num_views = 20\n", " blur_radius=0.0, \n",
"\n", " faces_per_pixel=1, \n",
"# Get a batch of viewing angles. \n", ")\n",
"elev = torch.linspace(0, 360, num_views)\n", "\n",
"azim = torch.linspace(-180, 180, num_views)\n", "# Create a phong renderer by composing a rasterizer and a shader. The textured \n",
"\n", "# phong shader will interpolate the texture uv coordinates for each vertex, \n",
"# Place a point light in front of the object. As mentioned above, the front of \n", "# sample from a texture image and apply the Phong lighting model\n",
"# the cow is facing the -z direction. \n", "renderer = MeshRenderer(\n",
"lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])\n", " rasterizer=MeshRasterizer(\n",
"\n", " cameras=camera, \n",
"# Initialize a camera that represents a batch of different \n", " raster_settings=raster_settings\n",
"# viewing angles. All the cameras helper methods support mixed type inputs and \n", " ),\n",
"# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n", " shader=SoftPhongShader(\n",
"# then specify elevation and azimuth angles for each viewpoint as tensors. \n", " device=device, \n",
"R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n", " cameras=camera,\n",
"cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n", " lights=lights\n",
"\n", " )\n",
"# We arbitrarily choose one particular view that will be used to visualize \n", ")\n",
"# results\n", "\n",
"camera = FoVPerspectiveCameras(device=device, R=R[None, 1, ...], \n", "# Create a batch of meshes by repeating the cow mesh and associated textures. \n",
" T=T[None, 1, ...]) \n", "# Meshes has a useful `extend` method which allows us do this very easily. \n",
"\n", "# This also extends the textures. \n",
"# Define the settings for rasterization and shading. Here we set the output \n", "meshes = mesh.extend(num_views)\n",
"# image to be of size 128X128. As we are rendering images for visualization \n", "\n",
"# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to \n", "# Render the cow mesh from each viewing angle\n",
"# rasterize_meshes.py for explanations of these parameters. We also leave \n", "target_images = renderer(meshes, cameras=cameras, lights=lights)\n",
"# bin_size and max_faces_per_bin to their default values of None, which sets \n", "\n",
"# their values using huristics and ensures that the faster coarse-to-fine \n", "# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n",
"# rasterization method is used. Refer to docs/notes/renderer.md for an \n", "# each of length num_views.\n",
"# explanation of the difference between naive and coarse-to-fine rasterization. \n", "target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n",
"raster_settings = RasterizationSettings(\n", "target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...], \n",
" image_size=128, \n", " T=T[None, i, ...]) for i in range(num_views)]"
" blur_radius=0.0, \n", ]
" faces_per_pixel=1, \n", },
")\n", {
"\n", "cell_type": "markdown",
"# Create a phong renderer by composing a rasterizer and a shader. The textured \n", "metadata": {
"# phong shader will interpolate the texture uv coordinates for each vertex, \n", "colab_type": "text",
"# sample from a texture image and apply the Phong lighting model\n", "id": "TppB4PVmR1Rc"
"renderer = MeshRenderer(\n", },
" rasterizer=MeshRasterizer(\n", "source": [
" cameras=camera, \n", "Visualize the dataset:"
" raster_settings=raster_settings\n", ]
" ),\n", },
" shader=TexturedSoftPhongShader(\n", {
" device=device, \n", "cell_type": "code",
" cameras=camera,\n", "execution_count": null,
" lights=lights\n", "metadata": {
" )\n", "colab": {},
")\n", "colab_type": "code",
"\n", "id": "HHE0CnbVR1Rd"
"# Create a batch of meshes by repeating the cow mesh and associated textures. \n", },
"# Meshes has a useful `extend` method which allows us do this very easily. \n", "outputs": [],
"# This also extends the textures. \n", "source": [
"meshes = mesh.extend(num_views)\n", "# RGB images\n",
"\n", "image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True)\n",
"# Render the cow mesh from each viewing angle\n", "plt.show()"
"target_images = renderer(meshes, cameras=cameras, lights=lights)\n", ]
"\n", },
"# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n", {
"# each of length num_views.\n", "cell_type": "markdown",
"target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n", "metadata": {
"target_cameras = [FoVPerspectiveCameras(device=device, R=R[None, i, ...], \n", "colab_type": "text",
" T=T[None, i, ...]) for i in range(num_views)]" "id": "gOb4rYx65E8z"
], },
"execution_count": null, "source": [
"outputs": [] "Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel."
}, ]
{ },
"cell_type": "markdown", {
"metadata": { "cell_type": "code",
"colab_type": "text", "execution_count": null,
"id": "TppB4PVmR1Rc" "metadata": {
}, "colab": {},
"source": [ "colab_type": "code",
"Visualize the dataset:" "id": "iP_g-nwX4exM"
] },
}, "outputs": [],
{ "source": [
"cell_type": "code", "# Rasterization settings for silhouette rendering \n",
"metadata": { "sigma = 1e-4\n",
"id": "HHE0CnbVR1Rd", "raster_settings_silhouette = RasterizationSettings(\n",
"colab_type": "code", " image_size=128, \n",
"colab": {} " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n",
}, " faces_per_pixel=50, \n",
"source": [ ")\n",
"# RGB images\n", "\n",
"image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True)\n", "# Silhouette renderer \n",
"plt.show()" "renderer_silhouette = MeshRenderer(\n",
], " rasterizer=MeshRasterizer(\n",
"execution_count": null, " cameras=camera, \n",
"outputs": [] " raster_settings=raster_settings_silhouette\n",
}, " ),\n",
{ " shader=SoftSilhouetteShader()\n",
"cell_type": "markdown", ")\n",
"metadata": { "\n",
"id": "gOb4rYx65E8z", "# Render silhouette images. The 3rd channel of the rendering output is \n",
"colab_type": "text" "# the alpha/silhouette channel\n",
}, "silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)\n",
"source": [ "target_silhouette = [silhouette_images[i, ..., 3] for i in range(num_views)]\n",
"Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel." "\n",
] "# Visualize silhouette images\n",
}, "image_grid(silhouette_images.cpu().numpy(), rows=4, cols=5, rgb=False)\n",
{ "plt.show()"
"cell_type": "code", ]
"metadata": { },
"id": "iP_g-nwX4exM", {
"colab_type": "code", "cell_type": "markdown",
"colab": {} "metadata": {
}, "colab_type": "text",
"source": [ "id": "t3qphI1ElUb5"
"# Rasterization settings for silhouette rendering \n", },
"sigma = 1e-4\n", "source": [
"raster_settings_silhouette = RasterizationSettings(\n", "## 3. Mesh prediction via silhouette rendering\n",
" image_size=128, \n", "In the previous section, we created a dataset of images of multiple viewpoints of a cow. In this section, we predict a mesh by observing those target images without any knowledge of the ground truth cow mesh. We assume we know the position of the cameras and lighting.\n",
" blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", "\n",
" faces_per_pixel=50, \n", "We first define some helper functions to visualize the results of our mesh prediction:"
")\n", ]
"\n", },
"# Silhouette renderer \n", {
"renderer_silhouette = MeshRenderer(\n", "cell_type": "code",
" rasterizer=MeshRasterizer(\n", "execution_count": null,
" cameras=camera, \n", "metadata": {
" raster_settings=raster_settings_silhouette\n", "colab": {},
" ),\n", "colab_type": "code",
" shader=SoftSilhouetteShader()\n", "id": "eeWYHROrR1Rh"
")\n", },
"\n", "outputs": [],
"# Render silhouette images. The 3rd channel of the rendering output is \n", "source": [
"# the alpha/silhouette channel\n", "# Show a visualization comparing the rendered predicted mesh to the ground truth \n",
"silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)\n", "# mesh\n",
"target_silhouette = [silhouette_images[i, ..., 3] for i in range(num_views)]\n", "def visualize_prediction(predicted_mesh, renderer=renderer_silhouette, \n",
"\n", " target_image=target_rgb[1], title='', \n",
"# Visualize silhouette images\n", " silhouette=False):\n",
"image_grid(silhouette_images.cpu().numpy(), rows=4, cols=5, rgb=False)\n", " inds = 3 if silhouette else range(3)\n",
"plt.show()" " predicted_images = renderer(predicted_mesh)\n",
], " plt.figure(figsize=(20, 10))\n",
"execution_count": null, " plt.subplot(1, 2, 1)\n",
"outputs": [] " plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())\n",
}, "\n",
{ " plt.subplot(1, 2, 2)\n",
"cell_type": "markdown", " plt.imshow(target_image.cpu().detach().numpy())\n",
"metadata": { " plt.title(title)\n",
"colab_type": "text", " plt.grid(\"off\")\n",
"id": "t3qphI1ElUb5" " plt.axis(\"off\")\n",
}, "\n",
"source": [ "# Plot losses as a function of optimization iteration\n",
"## 3. Mesh prediction via silhouette rendering\n", "def plot_losses(losses):\n",
"In the previous section, we created a dataset of images of multiple viewpoints of a cow. In this section, we predict a mesh by observing those target images without any knowledge of the ground truth cow mesh. We assume we know the position of the cameras and lighting.\n", " fig = plt.figure(figsize=(13, 5))\n",
"\n", " ax = fig.gca()\n",
"We first define some helper functions to visualize the results of our mesh prediction:" " for k, l in losses.items():\n",
] " ax.plot(l['values'], label=k + \" loss\")\n",
}, " ax.legend(fontsize=\"16\")\n",
{ " ax.set_xlabel(\"Iteration\", fontsize=\"16\")\n",
"cell_type": "code", " ax.set_ylabel(\"Loss\", fontsize=\"16\")\n",
"metadata": { " ax.set_title(\"Loss vs iterations\", fontsize=\"16\")"
"id": "eeWYHROrR1Rh", ]
"colab_type": "code", },
"colab": {} {
}, "cell_type": "markdown",
"source": [ "metadata": {
"# Show a visualization comparing the rendered predicted mesh to the ground truth \n", "colab_type": "text",
"# mesh\n", "id": "PpsvBpuMR1Ri"
"def visualize_prediction(predicted_mesh, renderer=renderer_silhouette, \n", },
" target_image=target_rgb[1], title='', \n", "source": [
" silhouette=False):\n", "Starting from a sphere mesh, we will learn offsets of each vertex such that the predicted mesh silhouette is more similar to the target silhouette image at each optimization step. We begin by loading our initial sphere mesh:"
" inds = 3 if silhouette else range(3)\n", ]
" predicted_images = renderer(predicted_mesh)\n", },
" plt.figure(figsize=(20, 10))\n", {
" plt.subplot(1, 2, 1)\n", "cell_type": "code",
" plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())\n", "execution_count": null,
"\n", "metadata": {
" plt.subplot(1, 2, 2)\n", "colab": {},
" plt.imshow(target_image.cpu().detach().numpy())\n", "colab_type": "code",
" plt.title(title)\n", "id": "i989ARH1R1Rj"
" plt.grid(\"off\")\n", },
" plt.axis(\"off\")\n", "outputs": [],
"\n", "source": [
"# Plot losses as a function of optimization iteration\n", "# We initialize the source shape to be a sphere of radius 1. \n",
"def plot_losses(losses):\n", "src_mesh = ico_sphere(4, device)"
" fig = plt.figure(figsize=(13, 5))\n", ]
" ax = fig.gca()\n", },
" for k, l in losses.items():\n", {
" ax.plot(l['values'], label=k + \" loss\")\n", "cell_type": "markdown",
" ax.legend(fontsize=\"16\")\n", "metadata": {
" ax.set_xlabel(\"Iteration\", fontsize=\"16\")\n", "colab_type": "text",
" ax.set_ylabel(\"Loss\", fontsize=\"16\")\n", "id": "f5xVtgLNDvC5"
" ax.set_title(\"Loss vs iterations\", fontsize=\"16\")" },
], "source": [
"execution_count": null, "We create a new differentiable renderer for rendering the silhouette of our predicted mesh:"
"outputs": [] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "code",
"metadata": { "execution_count": null,
"colab_type": "text", "metadata": {
"id": "PpsvBpuMR1Ri" "colab": {},
}, "colab_type": "code",
"source": [ "id": "sXfjzgG4DsDJ"
"Starting from a sphere mesh, we will learn offsets of each vertex such that the predicted mesh silhouette is more similar to the target silhouette image at each optimization step. We begin by loading our initial sphere mesh:" },
] "outputs": [],
}, "source": [
{ "# Rasterization settings for differentiable rendering, where the blur_radius\n",
"cell_type": "code", "# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n",
"metadata": { "# Renderer for Image-based 3D Reasoning', ICCV 2019\n",
"id": "i989ARH1R1Rj", "sigma = 1e-4\n",
"colab_type": "code", "raster_settings_soft = RasterizationSettings(\n",
"colab": {} " image_size=128, \n",
}, " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n",
"source": [ " faces_per_pixel=50, \n",
"# We initialize the source shape to be a sphere of radius 1. \n", ")\n",
"src_mesh = ico_sphere(4, device)" "\n",
], "# Silhouette renderer \n",
"execution_count": null, "renderer_silhouette = MeshRenderer(\n",
"outputs": [] " rasterizer=MeshRasterizer(\n",
}, " cameras=camera, \n",
{ " raster_settings=raster_settings_soft\n",
"cell_type": "markdown", " ),\n",
"metadata": { " shader=SoftSilhouetteShader()\n",
"id": "f5xVtgLNDvC5", ")"
"colab_type": "text" ]
}, },
"source": [ {
"We create a new differentiable renderer for rendering the silhouette of our predicted mesh:" "cell_type": "markdown",
] "metadata": {
}, "colab_type": "text",
{ "id": "SGJKbCB6R1Rk"
"cell_type": "code", },
"metadata": { "source": [
"id": "sXfjzgG4DsDJ", "We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target silhouettes:"
"colab_type": "code", ]
"colab": {} },
}, {
"source": [ "cell_type": "code",
"# Rasterization settings for differentiable rendering, where the blur_radius\n", "execution_count": null,
"# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n", "metadata": {
"# Renderer for Image-based 3D Reasoning', ICCV 2019\n", "colab": {},
"sigma = 1e-4\n", "colab_type": "code",
"raster_settings_soft = RasterizationSettings(\n", "id": "0sLrKv_MEULh"
" image_size=128, \n", },
" blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", "outputs": [],
" faces_per_pixel=50, \n", "source": [
")\n", "# Number of views to optimize over in each SGD iteration\n",
"\n", "num_views_per_iteration = 2\n",
"# Silhouette renderer \n", "# Number of optimization steps\n",
"renderer_silhouette = MeshRenderer(\n", "Niter = 2000\n",
" rasterizer=MeshRasterizer(\n", "# Plot period for the losses\n",
" cameras=camera, \n", "plot_period = 250\n",
" raster_settings=raster_settings_soft\n", "\n",
" ),\n", "%matplotlib inline\n",
" shader=SoftSilhouetteShader()\n", "\n",
")" "# Optimize using rendered silhouette image loss, mesh edge loss, mesh normal \n",
], "# consistency, and mesh laplacian smoothing\n",
"execution_count": null, "losses = {\"silhouette\": {\"weight\": 1.0, \"values\": []},\n",
"outputs": [] " \"edge\": {\"weight\": 1.0, \"values\": []},\n",
}, " \"normal\": {\"weight\": 0.01, \"values\": []},\n",
{ " \"laplacian\": {\"weight\": 1.0, \"values\": []},\n",
"cell_type": "markdown", " }\n",
"metadata": { "\n",
"colab_type": "text", "# Losses to smooth / regularize the mesh shape\n",
"id": "SGJKbCB6R1Rk" "def update_mesh_shape_prior_losses(mesh, loss):\n",
}, " # and (b) the edge length of the predicted mesh\n",
"source": [ " loss[\"edge\"] = mesh_edge_loss(mesh)\n",
"We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target silhouettes:" " \n",
] " # mesh normal consistency\n",
}, " loss[\"normal\"] = mesh_normal_consistency(mesh)\n",
{ " \n",
"cell_type": "code", " # mesh laplacian smoothing\n",
"metadata": { " loss[\"laplacian\"] = mesh_laplacian_smoothing(mesh, method=\"uniform\")\n",
"id": "0sLrKv_MEULh", "\n",
"colab_type": "code", "# We will learn to deform the source mesh by offsetting its vertices\n",
"colab": {} "# The shape of the deform parameters is equal to the total number of vertices in\n",
}, "# src_mesh\n",
"source": [ "verts_shape = src_mesh.verts_packed().shape\n",
"# Number of views to optimize over in each SGD iteration\n", "deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n",
"num_views_per_iteration = 2\n", "\n",
"# Number of optimization steps\n", "# The optimizer\n",
"Niter = 2000\n", "optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)"
"# Plot period for the losses\n", ]
"plot_period = 250\n", },
"\n", {
"%matplotlib inline\n", "cell_type": "markdown",
"\n", "metadata": {
"# Optimize using rendered silhouette image loss, mesh edge loss, mesh normal \n", "colab_type": "text",
"# consistency, and mesh laplacian smoothing\n", "id": "QLc9zK8lEqFS"
"losses = {\"silhouette\": {\"weight\": 1.0, \"values\": []},\n", },
" \"edge\": {\"weight\": 1.0, \"values\": []},\n", "source": [
" \"normal\": {\"weight\": 0.01, \"values\": []},\n", "We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images:"
" \"laplacian\": {\"weight\": 1.0, \"values\": []},\n", ]
" }\n", },
"\n", {
"# Losses to smooth / regularize the mesh shape\n", "cell_type": "code",
"def update_mesh_shape_prior_losses(mesh, loss):\n", "execution_count": null,
" # and (b) the edge length of the predicted mesh\n", "metadata": {
" loss[\"edge\"] = mesh_edge_loss(mesh)\n", "colab": {},
" \n", "colab_type": "code",
" # mesh normal consistency\n", "id": "gCfepfOoR1Rl"
" loss[\"normal\"] = mesh_normal_consistency(mesh)\n", },
" \n", "outputs": [],
" # mesh laplacian smoothing\n", "source": [
" loss[\"laplacian\"] = mesh_laplacian_smoothing(mesh, method=\"uniform\")\n", "loop = tqdm(range(Niter))\n",
"\n", "\n",
"# We will learn to deform the source mesh by offsetting its vertices\n", "for i in loop:\n",
"# The shape of the deform parameters is equal to the total number of vertices in\n", " # Initialize optimizer\n",
"# src_mesh\n", " optimizer.zero_grad()\n",
"verts_shape = src_mesh.verts_packed().shape\n", " \n",
"deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n", " # Deform the mesh\n",
"\n", " new_src_mesh = src_mesh.offset_verts(deform_verts)\n",
"# The optimizer\n", " \n",
"optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)" " # Losses to smooth /regularize the mesh shape\n",
], " loss = {k: torch.tensor(0.0, device=device) for k in losses}\n",
"execution_count": null, " update_mesh_shape_prior_losses(new_src_mesh, loss)\n",
"outputs": [] " \n",
}, " # Compute the average silhouette loss over two random views, as the average \n",
{ " # squared L2 distance between the predicted silhouette and the target \n",
"cell_type": "markdown", " # silhouette from our dataset\n",
"metadata": { " for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n",
"colab_type": "text", " images_predicted = renderer_silhouette(new_src_mesh, cameras=target_cameras[j], lights=lights)\n",
"id": "QLc9zK8lEqFS" " predicted_silhouette = images_predicted[..., 3]\n",
}, " loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n",
"source": [ " loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n",
"We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images:" " \n",
] " # Weighted sum of the losses\n",
}, " sum_loss = torch.tensor(0.0, device=device)\n",
{ " for k, l in loss.items():\n",
"cell_type": "code", " sum_loss += l * losses[k][\"weight\"]\n",
"metadata": { " losses[k][\"values\"].append(l)\n",
"id": "gCfepfOoR1Rl", " \n",
"colab_type": "code", " # Print the losses\n",
"colab": {} " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n",
}, " \n",
"source": [ " # Plot mesh\n",
"loop = tqdm(range(Niter))\n", " if i % plot_period == 0:\n",
"\n", " visualize_prediction(new_src_mesh, title=\"iter: %d\" % i, silhouette=True,\n",
"for i in loop:\n", " target_image=target_silhouette[1])\n",
" # Initialize optimizer\n", " \n",
" optimizer.zero_grad()\n", " # Optimization step\n",
" \n", " sum_loss.backward()\n",
" # Deform the mesh\n", " optimizer.step()"
" new_src_mesh = src_mesh.offset_verts(deform_verts)\n", ]
" \n", },
" # Losses to smooth /regularize the mesh shape\n", {
" loss = {k: torch.tensor(0.0, device=device) for k in losses}\n", "cell_type": "code",
" update_mesh_shape_prior_losses(new_src_mesh, loss)\n", "execution_count": null,
" \n", "metadata": {
" # Compute the average silhouette loss over two random views, as the average \n", "colab": {},
" # squared L2 distance between the predicted silhouette and the target \n", "colab_type": "code",
" # silhouette from our dataset\n", "id": "CX4huayKR1Rm",
" for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n", "scrolled": true
" images_predicted = renderer_silhouette(new_src_mesh, cameras=target_cameras[j], lights=lights)\n", },
" predicted_silhouette = images_predicted[..., 3]\n", "outputs": [],
" loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n", "source": [
" loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n", "visualize_prediction(new_src_mesh, silhouette=True, \n",
" \n", " target_image=target_silhouette[1])\n",
" # Weighted sum of the losses\n", "plot_losses(losses)"
" sum_loss = torch.tensor(0.0, device=device)\n", ]
" for k, l in loss.items():\n", },
" sum_loss += l * losses[k][\"weight\"]\n", {
" losses[k][\"values\"].append(l)\n", "cell_type": "markdown",
" \n", "metadata": {
" # Print the losses\n", "colab_type": "text",
" loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", "id": "XJDsJQmrR1Ro"
" \n", },
" # Plot mesh\n", "source": [
" if i % plot_period == 0:\n", "## 3. Mesh and texture prediction via textured rendering\n",
" visualize_prediction(new_src_mesh, title=\"iter: %d\" % i, silhouette=True,\n", "We can predict both the mesh and its texture if we add an additional loss based on the comparing a predicted rendered RGB image to the target image. As before, we start with a sphere mesh. We learn both translational offsets and RGB texture colors for each vertex in the sphere mesh. Since our loss is based on rendered RGB pixel values instead of just the silhouette, we use a **SoftPhongShader** instead of a **SoftSilhouetteShader**."
" target_image=target_silhouette[1])\n", ]
" \n", },
" # Optimization step\n", {
" sum_loss.backward()\n", "cell_type": "code",
" optimizer.step()" "execution_count": null,
], "metadata": {
"execution_count": null, "colab": {},
"outputs": [] "colab_type": "code",
}, "id": "aZObyIt9R1Ro"
{ },
"cell_type": "code", "outputs": [],
"metadata": { "source": [
"scrolled": true, "# Rasterization settings for differentiable rendering, where the blur_radius\n",
"id": "CX4huayKR1Rm", "# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n",
"colab_type": "code", "# Renderer for Image-based 3D Reasoning', ICCV 2019\n",
"colab": {} "sigma = 1e-4\n",
}, "raster_settings_soft = RasterizationSettings(\n",
"source": [ " image_size=128, \n",
"visualize_prediction(new_src_mesh, silhouette=True, \n", " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n",
" target_image=target_silhouette[1])\n", " faces_per_pixel=50, \n",
"plot_losses(losses)" ")\n",
], "\n",
"execution_count": null, "# Differentiable soft renderer using per vertex RGB colors for texture\n",
"outputs": [] "renderer_textured = MeshRenderer(\n",
}, " rasterizer=MeshRasterizer(\n",
{ " cameras=camera, \n",
"cell_type": "markdown", " raster_settings=raster_settings_soft\n",
"metadata": { " ),\n",
"colab_type": "text", " shader=SoftPhongShader(device=device, \n",
"id": "XJDsJQmrR1Ro" " cameras=camera,\n",
}, " lights=lights)\n",
"source": [ ")"
"## 3. Mesh and texture prediction via textured rendering\n", ]
"We can predict both the mesh and its texture if we add an additional loss based on the comparing a predicted rendered RGB image to the target image. As before, we start with a sphere mesh. We learn both translational offsets and RGB texture colors for each vertex in the sphere mesh. Since our loss is based on rendered RGB pixel values instead of just the silhouette, we use a **SoftPhongShader** instead of a **SoftSilhouetteShader**. Note also that we use a **SoftPhongShader** instead of the **TexturedSoftPhongShader** used to generate our dataset, because we represent texture using per vertex RGB colors instead of a texture image." },
] {
}, "cell_type": "markdown",
{ "metadata": {
"cell_type": "code", "colab_type": "text",
"metadata": { "id": "NM7gJux8GMQX"
"id": "aZObyIt9R1Ro", },
"colab_type": "code", "source": [
"colab": {} "We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target RGB images:"
}, ]
"source": [ },
"# Rasterization settings for differentiable rendering, where the blur_radius\n", {
"# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n", "cell_type": "code",
"# Renderer for Image-based 3D Reasoning', ICCV 2019\n", "execution_count": null,
"sigma = 1e-4\n", "metadata": {
"raster_settings_soft = RasterizationSettings(\n", "colab": {},
" image_size=128, \n", "colab_type": "code",
" blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", "id": "BS6LAQquF3wq"
" faces_per_pixel=50, \n", },
")\n", "outputs": [],
"\n", "source": [
"# Differentiable soft renderer using per vertex RGB colors for texture\n", "# Number of views to optimize over in each SGD iteration\n",
"renderer_textured = MeshRenderer(\n", "num_views_per_iteration = 2\n",
" rasterizer=MeshRasterizer(\n", "# Number of optimization steps\n",
" cameras=camera, \n", "Niter = 2000\n",
" raster_settings=raster_settings_soft\n", "# Plot period for the losses\n",
" ),\n", "plot_period = 250\n",
" shader=SoftPhongShader(device=device, \n", "\n",
" cameras=camera,\n", "%matplotlib inline\n",
" lights=lights)\n", "\n",
")" "# Optimize using rendered RGB image loss, rendered silhouette image loss, mesh \n",
], "# edge loss, mesh normal consistency, and mesh laplacian smoothing\n",
"execution_count": null, "losses = {\"rgb\": {\"weight\": 1.0, \"values\": []},\n",
"outputs": [] " \"silhouette\": {\"weight\": 1.0, \"values\": []},\n",
}, " \"edge\": {\"weight\": 1.0, \"values\": []},\n",
{ " \"normal\": {\"weight\": 0.01, \"values\": []},\n",
"cell_type": "markdown", " \"laplacian\": {\"weight\": 1.0, \"values\": []},\n",
"metadata": { " }\n",
"colab_type": "text", "\n",
"id": "NM7gJux8GMQX" "# We will learn to deform the source mesh by offsetting its vertices\n",
}, "# The shape of the deform parameters is equal to the total number of vertices in \n",
"source": [ "# src_mesh\n",
"We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target RGB images:" "verts_shape = src_mesh.verts_packed().shape\n",
] "deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n",
}, "\n",
{ "# We will also learn per vertex colors for our sphere mesh that define texture \n",
"cell_type": "code", "# of the mesh\n",
"metadata": { "sphere_verts_rgb = torch.full([1, verts_shape[0], 3], 0.5, device=device, requires_grad=True)\n",
"id": "BS6LAQquF3wq", "\n",
"colab_type": "code", "# The optimizer\n",
"colab": {} "optimizer = torch.optim.SGD([deform_verts, sphere_verts_rgb], lr=1.0, momentum=0.9)"
}, ]
"source": [ },
"# Number of views to optimize over in each SGD iteration\n", {
"num_views_per_iteration = 2\n", "cell_type": "markdown",
"# Number of optimization steps\n", "metadata": {
"Niter = 2000\n", "colab_type": "text",
"# Plot period for the losses\n", "id": "tzIAycuUR1Rq"
"plot_period = 250\n", },
"\n", "source": [
"%matplotlib inline\n", "We write an optimization loop to iteratively refine our predicted mesh and its vertex colors from the sphere mesh into a mesh that matches the target images:"
"\n", ]
"# Optimize using rendered RGB image loss, rendered silhouette image loss, mesh \n", },
"# edge loss, mesh normal consistency, and mesh laplacian smoothing\n", {
"losses = {\"rgb\": {\"weight\": 1.0, \"values\": []},\n", "cell_type": "code",
" \"silhouette\": {\"weight\": 1.0, \"values\": []},\n", "execution_count": null,
" \"edge\": {\"weight\": 1.0, \"values\": []},\n", "metadata": {
" \"normal\": {\"weight\": 0.01, \"values\": []},\n", "colab": {},
" \"laplacian\": {\"weight\": 1.0, \"values\": []},\n", "colab_type": "code",
" }\n", "id": "EKEH2p8-R1Rr"
"\n", },
"# We will learn to deform the source mesh by offsetting its vertices\n", "outputs": [],
"# The shape of the deform parameters is equal to the total number of vertices in \n", "source": [
"# src_mesh\n", "loop = tqdm(range(Niter))\n",
"verts_shape = src_mesh.verts_packed().shape\n", "\n",
"deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n", "for i in loop:\n",
"\n", " # Initialize optimizer\n",
"# We will also learn per vertex colors for our sphere mesh that define texture \n", " optimizer.zero_grad()\n",
"# of the mesh\n", " \n",
"sphere_verts_rgb = torch.full([1, verts_shape[0], 3], 0.5, device=device, requires_grad=True)\n", " # Deform the mesh\n",
"\n", " new_src_mesh = src_mesh.offset_verts(deform_verts)\n",
"# The optimizer\n", " \n",
"optimizer = torch.optim.SGD([deform_verts, sphere_verts_rgb], lr=1.0, momentum=0.9)" " # Add per vertex colors to texture the mesh\n",
], " new_src_mesh.textures = TexturesVertex(verts_rgb=sphere_verts_rgb) \n",
"execution_count": null, " \n",
"outputs": [] " # Losses to smooth /regularize the mesh shape\n",
}, " loss = {k: torch.tensor(0.0, device=device) for k in losses}\n",
{ " update_mesh_shape_prior_losses(new_src_mesh, loss)\n",
"cell_type": "markdown", " \n",
"metadata": { " # Randomly select two views to optimize over in this iteration. Compared\n",
"colab_type": "text", " # to using just one view, this helps resolve ambiguities between updating\n",
"id": "tzIAycuUR1Rq" " # mesh shape vs. updating mesh texture\n",
}, " for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n",
"source": [ " images_predicted = renderer_textured(new_src_mesh, cameras=target_cameras[j], lights=lights)\n",
"We write an optimization loop to iteratively refine our predicted mesh and its vertex colors from the sphere mesh into a mesh that matches the target images:" "\n",
] " # Squared L2 distance between the predicted silhouette and the target \n",
}, " # silhouette from our dataset\n",
{ " predicted_silhouette = images_predicted[..., 3]\n",
"cell_type": "code", " loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n",
"metadata": { " loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n",
"id": "EKEH2p8-R1Rr", " \n",
"colab_type": "code", " # Squared L2 distance between the predicted RGB image and the target \n",
"colab": {} " # image from our dataset\n",
}, " predicted_rgb = images_predicted[..., :3]\n",
"source": [ " loss_rgb = ((predicted_rgb - target_rgb[j]) ** 2).mean()\n",
"loop = tqdm(range(Niter))\n", " loss[\"rgb\"] += loss_rgb / num_views_per_iteration\n",
"\n", " \n",
"for i in loop:\n", " # Weighted sum of the losses\n",
" # Initialize optimizer\n", " sum_loss = torch.tensor(0.0, device=device)\n",
" optimizer.zero_grad()\n", " for k, l in loss.items():\n",
" \n", " sum_loss += l * losses[k][\"weight\"]\n",
" # Deform the mesh\n", " losses[k][\"values\"].append(l)\n",
" new_src_mesh = src_mesh.offset_verts(deform_verts)\n", " \n",
" \n", " # Print the losses\n",
" # Add per vertex colors to texture the mesh\n", " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n",
" new_src_mesh.textures = Textures(verts_rgb=sphere_verts_rgb) \n", " \n",
" \n", " # Plot mesh\n",
" # Losses to smooth /regularize the mesh shape\n", " if i % plot_period == 0:\n",
" loss = {k: torch.tensor(0.0, device=device) for k in losses}\n", " visualize_prediction(new_src_mesh, renderer=renderer_textured, title=\"iter: %d\" % i, silhouette=False)\n",
" update_mesh_shape_prior_losses(new_src_mesh, loss)\n", " \n",
" \n", " # Optimization step\n",
" # Randomly select two views to optimize over in this iteration. Compared\n", " sum_loss.backward()\n",
" # to using just one view, this helps resolve ambiguities between updating\n", " optimizer.step()\n"
" # mesh shape vs. updating mesh texture\n", ]
" for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n", },
" images_predicted = renderer_textured(new_src_mesh, cameras=target_cameras[j], lights=lights)\n", {
"\n", "cell_type": "code",
" # Squared L2 distance between the predicted silhouette and the target \n", "execution_count": null,
" # silhouette from our dataset\n", "metadata": {
" predicted_silhouette = images_predicted[..., 3]\n", "colab": {},
" loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n", "colab_type": "code",
" loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n", "id": "2qTcHO4rR1Rs",
" \n", "scrolled": true
" # Squared L2 distance between the predicted RGB image and the target \n", },
" # image from our dataset\n", "outputs": [],
" predicted_rgb = images_predicted[..., :3]\n", "source": [
" loss_rgb = ((predicted_rgb - target_rgb[j]) ** 2).mean()\n", "visualize_prediction(new_src_mesh, renderer=renderer_textured, silhouette=False)\n",
" loss[\"rgb\"] += loss_rgb / num_views_per_iteration\n", "plot_losses(losses)"
" \n", ]
" # Weighted sum of the losses\n", },
" sum_loss = torch.tensor(0.0, device=device)\n", {
" for k, l in loss.items():\n", "cell_type": "markdown",
" sum_loss += l * losses[k][\"weight\"]\n", "metadata": {
" losses[k][\"values\"].append(l)\n", "colab_type": "text",
" \n", "id": "akBOm_xcNUms"
" # Print the losses\n", },
" loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", "source": [
" \n", "Save the final predicted mesh:"
" # Plot mesh\n", ]
" if i % plot_period == 0:\n", },
" visualize_prediction(new_src_mesh, renderer=renderer_textured, title=\"iter: %d\" % i, silhouette=False)\n", {
" \n", "cell_type": "markdown",
" # Optimization step\n", "metadata": {
" sum_loss.backward()\n", "colab_type": "text",
" optimizer.step()\n" "id": "dXoIsGyhxRyK"
], },
"execution_count": null, "source": [
"outputs": [] "## 4. Save the final predicted mesh"
}, ]
{ },
"cell_type": "code", {
"metadata": { "cell_type": "code",
"scrolled": true, "execution_count": null,
"id": "2qTcHO4rR1Rs", "metadata": {
"colab_type": "code", "colab": {},
"colab": {} "colab_type": "code",
}, "id": "OQGhV-psKna8"
"source": [ },
"visualize_prediction(new_src_mesh, renderer=renderer_textured, silhouette=False)\n", "outputs": [],
"plot_losses(losses)" "source": [
], "# Fetch the verts and faces of the final predicted mesh\n",
"execution_count": null, "final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)\n",
"outputs": [] "\n",
}, "# Scale normalize back to the original target size\n",
{ "final_verts = final_verts * scale + center\n",
"cell_type": "markdown", "\n",
"metadata": { "# Store the predicted mesh using save_obj\n",
"id": "akBOm_xcNUms", "final_obj = os.path.join('./', 'final_model.obj')\n",
"colab_type": "text" "save_obj(final_obj, final_verts, final_faces)"
}, ]
"source": [ },
"Save the final predicted mesh:" {
] "cell_type": "markdown",
}, "metadata": {
{ "colab_type": "text",
"cell_type": "markdown", "id": "MtKYp0B6R1Ru"
"metadata": { },
"colab_type": "text", "source": [
"id": "dXoIsGyhxRyK" "## 5. Conclusion\n",
}, "In this tutorial, we learned how to load a textured mesh from an obj file, create a synthetic dataset by rendering the mesh from multiple viewpoints. We showed how to set up an optimization loop to fit a mesh to the observed dataset images based on a rendered silhouette loss. We then augmented this optimization loop with an additional loss based on rendered RGB images, which allowed us to predict both a mesh and its texture."
"source": [ ]
"## 4. Save the final predicted mesh" }
] ],
}, "metadata": {
{ "accelerator": "GPU",
"cell_type": "code", "anp_metadata": {
"metadata": { "path": "fbsource/fbcode/vision/fair/pytorch3d/docs/tutorials/fit_textured_mesh.ipynb"
"id": "OQGhV-psKna8", },
"colab_type": "code", "bento_stylesheets": {
"colab": {} "bento/extensions/flow/main.css": true,
}, "bento/extensions/kernel_selector/main.css": true,
"source": [ "bento/extensions/kernel_ui/main.css": true,
"# Fetch the verts and faces of the final predicted mesh\n", "bento/extensions/new_kernel/main.css": true,
"final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)\n", "bento/extensions/system_usage/main.css": true,
"\n", "bento/extensions/theme/main.css": true
"# Scale normalize back to the original target size\n", },
"final_verts = final_verts * scale + center\n", "colab": {
"\n", "name": "fit_textured_mesh.ipynb",
"# Store the predicted mesh using save_obj\n", "provenance": [],
"final_obj = os.path.join('./', 'final_model.obj')\n", "toc_visible": true
"save_obj(final_obj, final_verts, final_faces)" },
], "disseminate_notebook_info": {
"execution_count": null, "backup_notebook_id": "781874812352022"
"outputs": [] },
}, "kernelspec": {
{ "display_name": "intro_to_cv",
"cell_type": "markdown", "language": "python",
"metadata": { "name": "bento_kernel_intro_to_cv"
"colab_type": "text", },
"id": "MtKYp0B6R1Ru" "language_info": {
}, "codemirror_mode": {
"source": [ "name": "ipython",
"## 5. Conclusion\n", "version": 3
"In this tutorial, we learned how to load a textured mesh from an obj file, create a synthetic dataset by rendering the mesh from multiple viewpoints. We showed how to set up an optimization loop to fit a mesh to the observed dataset images based on a rendered silhouette loss. We then augmented this optimization loop with an additional loss based on rendered RGB images, which allowed us to predict both a mesh and its texture." },
] "file_extension": ".py",
} "mimetype": "text/x-python",
] "name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.5+"
}
},
"nbformat": 4,
"nbformat_minor": 1
} }
...@@ -87,7 +87,7 @@ ...@@ -87,7 +87,7 @@
"from pytorch3d.io import load_objs_as_meshes, load_obj\n", "from pytorch3d.io import load_objs_as_meshes, load_obj\n",
"\n", "\n",
"# Data structures and functions for rendering\n", "# Data structures and functions for rendering\n",
"from pytorch3d.structures import Meshes, Textures\n", "from pytorch3d.structures import Meshes\n",
"from pytorch3d.renderer import (\n", "from pytorch3d.renderer import (\n",
" look_at_view_transform,\n", " look_at_view_transform,\n",
" FoVPerspectiveCameras, \n", " FoVPerspectiveCameras, \n",
...@@ -97,7 +97,8 @@ ...@@ -97,7 +97,8 @@
" RasterizationSettings, \n", " RasterizationSettings, \n",
" MeshRenderer, \n", " MeshRenderer, \n",
" MeshRasterizer, \n", " MeshRasterizer, \n",
" SoftPhongShader\n", " SoftPhongShader,\n",
" TexturesUV\n",
")\n", ")\n",
"\n", "\n",
"# add path for demo utils functions \n", "# add path for demo utils functions \n",
...@@ -170,7 +171,7 @@ ...@@ -170,7 +171,7 @@
"\n", "\n",
"**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n",
"\n", "\n",
"**Textures** is an auxillary datastructure for storing texture information about meshes. \n", "**TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes. \n",
"\n", "\n",
"**Meshes** has several class methods which are used throughout the rendering pipeline." "**Meshes** has several class methods which are used throughout the rendering pipeline."
] ]
...@@ -537,7 +538,7 @@ ...@@ -537,7 +538,7 @@
"source": [ "source": [
"# We can pass arbirary keyword arguments to the rasterizer/shader via the renderer\n", "# We can pass arbirary keyword arguments to the rasterizer/shader via the renderer\n",
"# so the renderer does not need to be reinitialized if any of the settings change.\n", "# so the renderer does not need to be reinitialized if any of the settings change.\n",
"images = renderer(meshes, cameras=cameras, lights=lights)" "images = renderer(mesh, cameras=cameras, lights=lights)"
] ]
}, },
{ {
...@@ -582,9 +583,9 @@ ...@@ -582,9 +583,9 @@
"backup_notebook_id": "569222367081034" "backup_notebook_id": "569222367081034"
}, },
"kernelspec": { "kernelspec": {
"display_name": "pytorch3d (local)", "display_name": "intro_to_cv",
"language": "python", "language": "python",
"name": "pytorch3d_local" "name": "bento_kernel_intro_to_cv"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
......
...@@ -599,11 +599,6 @@ class TexturesUV(TexturesBase): ...@@ -599,11 +599,6 @@ class TexturesUV(TexturesBase):
if not all(v.device == self.device for v in verts_uvs): if not all(v.device == self.device for v in verts_uvs):
raise ValueError("verts_uvs and faces_uvs must be on the same device") raise ValueError("verts_uvs and faces_uvs must be on the same device")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._num_verts_per_mesh = [len(v) for v in verts_uvs]
elif torch.is_tensor(verts_uvs): elif torch.is_tensor(verts_uvs):
if ( if (
verts_uvs.ndim != 3 verts_uvs.ndim != 3
...@@ -621,7 +616,6 @@ class TexturesUV(TexturesBase): ...@@ -621,7 +616,6 @@ class TexturesUV(TexturesBase):
# These values may be overridden when textures is # These values may be overridden when textures is
# passed into the Meshes constructor. # passed into the Meshes constructor.
max_V = verts_uvs.shape[1] max_V = verts_uvs.shape[1]
self._num_verts_per_mesh = [max_V] * self._N
else: else:
raise ValueError("Expected verts_uvs to be a tensor or list") raise ValueError("Expected verts_uvs to be a tensor or list")
...@@ -758,9 +752,11 @@ class TexturesUV(TexturesBase): ...@@ -758,9 +752,11 @@ class TexturesUV(TexturesBase):
torch.empty((0, 2), dtype=torch.float32, device=self.device) torch.empty((0, 2), dtype=torch.float32, device=self.device)
] * self._N ] * self._N
else: else:
self._verts_uvs_list = padded_to_list( # The number of vertices in the mesh and in verts_uvs can differ
self._verts_uvs_padded, split_size=self._num_verts_per_mesh # e.g. if a vertex is shared between 3 faces, it can
) # have up to 3 different uv coordinates. Therefore we cannot
# convert directly from padded to list using _num_verts_per_mesh
self._verts_uvs_list = list(self._verts_uvs_padded.unbind(0))
return self._verts_uvs_list return self._verts_uvs_list
# Currently only the padded maps are used. # Currently only the padded maps are used.
...@@ -783,7 +779,6 @@ class TexturesUV(TexturesBase): ...@@ -783,7 +779,6 @@ class TexturesUV(TexturesBase):
"verts_uvs_padded", "verts_uvs_padded",
"faces_uvs_padded", "faces_uvs_padded",
"_num_faces_per_mesh", "_num_faces_per_mesh",
"_num_verts_per_mesh",
], ],
) )
new_tex = TexturesUV( new_tex = TexturesUV(
...@@ -791,8 +786,8 @@ class TexturesUV(TexturesBase): ...@@ -791,8 +786,8 @@ class TexturesUV(TexturesBase):
faces_uvs=new_props["faces_uvs_padded"], faces_uvs=new_props["faces_uvs_padded"],
verts_uvs=new_props["verts_uvs_padded"], verts_uvs=new_props["verts_uvs_padded"],
) )
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"]
return new_tex return new_tex
def sample_textures(self, fragments, **kwargs) -> torch.Tensor: def sample_textures(self, fragments, **kwargs) -> torch.Tensor:
...@@ -860,6 +855,7 @@ class TexturesUV(TexturesBase): ...@@ -860,6 +855,7 @@ class TexturesUV(TexturesBase):
# right-bottom pixel of input. # right-bottom pixel of input.
pixel_uvs = pixel_uvs * 2.0 - 1.0 pixel_uvs = pixel_uvs * 2.0 - 1.0
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
if texture_maps.device != pixel_uvs.device: if texture_maps.device != pixel_uvs.device:
texture_maps = texture_maps.to(pixel_uvs.device) texture_maps = texture_maps.to(pixel_uvs.device)
......
...@@ -588,10 +588,19 @@ class TestTexturesUV(TestCaseMixin, unittest.TestCase): ...@@ -588,10 +588,19 @@ class TestTexturesUV(TestCaseMixin, unittest.TestCase):
tex_init = tex_mesh.textures tex_init = tex_mesh.textures
new_tex = new_mesh.textures new_tex = new_mesh.textures
new_tex_num_verts = new_mesh.num_verts_per_mesh()
for i in range(len(tex_mesh)): for i in range(len(tex_mesh)):
for n in range(N): for n in range(N):
tex_nv = new_tex_num_verts[i * N + n]
self.assertClose( self.assertClose(
tex_init.verts_uvs_list()[i], new_tex.verts_uvs_list()[i * N + n] # The original textures were initialized using
# verts uvs list
tex_init.verts_uvs_list()[i],
# In the new textures, the verts_uvs are initialized
# from padded. The verts per mesh are not used to
# convert from padded to list. See TexturesUV for an
# explanation.
new_tex.verts_uvs_list()[i * N + n][:tex_nv, ...],
) )
self.assertClose( self.assertClose(
tex_init.faces_uvs_list()[i], new_tex.faces_uvs_list()[i * N + n] tex_init.faces_uvs_list()[i], new_tex.faces_uvs_list()[i * N + n]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment