diff --git a/lib/sdf.py b/lib/sdf.py index c44cca3..a7a8487 100644 --- a/lib/sdf.py +++ b/lib/sdf.py @@ -90,9 +90,9 @@ def eval_grid_octree(coords, eval_func, sdf = np.zeros(resolution) - notprocessed = np.zeros(resolution, dtype=np.bool) + notprocessed = np.zeros(resolution, dtype=bool) notprocessed[:-1,:-1,:-1] = True - grid_mask = np.zeros(resolution, dtype=np.bool) + grid_mask = np.zeros(resolution, dtype=bool) reso = resolution[0] // init_resolution diff --git a/working_copy_of_PIFuHD_Demo_aug24.ipynb b/working_copy_of_PIFuHD_Demo_aug24.ipynb new file mode 100644 index 0000000..46d235d --- /dev/null +++ b/working_copy_of_PIFuHD_Demo_aug24.ipynb @@ -0,0 +1,916 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4", + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eclLG4xlJRIE" + }, + "source": [ + "# PIFuHD Demo: https://shunsukesaito.github.io/PIFuHD/\n", + "\n", + "![](https://shunsukesaito.github.io/PIFuHD/resources/images/pifuhd.gif)\n", + "\n", + "Made by [![Follow](https://img.shields.io/twitter/follow/psyth91?style=social)](https://twitter.com/psyth91)\n", + "\n", + "To see how the model works, visit the project repository.\n", + "\n", + "[![GitHub stars](https://img.shields.io/github/stars/facebookresearch/pifuhd?style=social)](https://github.com/facebookresearch/pifuhd)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wmFdsTvLKtBO" + }, + "source": [ + "## Note\n", + "Make sure that your runtime type is 'Python 3 with GPU acceleration'. To do so, go to Edit > Notebook settings > Hardware Accelerator > Select \"GPU\"." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1TfPAtL4CyZw" + }, + "source": [ + "## More Info\n", + "- Paper: https://arxiv.org/pdf/2004.00452.pdf\n", + "- Repo: https://github.com/facebookresearch/pifuhd\n", + "- Project Page: https://shunsukesaito.github.io/PIFuHD/\n", + "- 1-minute/5-minute Presentation (see below)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5DDpqpf2BABR", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 931 + }, + "outputId": "55b6f19a-7860-4750-9695-27032ce638e2" + }, + "source": [ + "import IPython\n", + "IPython.display.HTML('

1-Minute Presentation


5-Minute Presentation

')" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "

1-Minute Presentation


5-Minute Presentation

" + ] + }, + "metadata": {}, + "execution_count": 1 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8vZaAyhUJ9QC" + }, + "source": [ + "## Requirements\n", + "- Python 3\n", + "- PyTorch tested on 1.4.0\n", + "- json\n", + "- PIL\n", + "- skimage\n", + "- tqdm\n", + "- numpy\n", + "- cv2" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sfPDep8LlP_I" + }, + "source": [ + "## Help! I'm new to Google Colab\n", + "\n", + "You can check out the following youtube video on how to upload your own picture and run PIFuHD. **Note that with new update, you can upload your own picture more easily with GUI down below.**\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zaMP1EitljaA", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 461 + }, + "outputId": "6f2b9df4-fdda-49a3-9db8-72a63e232c77" + }, + "source": [ + "import IPython\n", + "IPython.display.HTML('')" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "/usr/local/lib/python3.10/dist-packages/IPython/core/display.py:724: UserWarning: Consider using IPython.display.IFrame instead\n", + " warnings.warn(\"Consider using IPython.display.IFrame instead\")\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "" + ] + }, + "metadata": {}, + "execution_count": 2 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WYhlsDkg1Hwb" + }, + "source": [ + "## Clone PIFuHD repository" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BmpEwdOd1G1z", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "2d1252d1-bf47-4fb7-cc75-bdb466650204" + }, + "source": [ + "!git clone https://github.com/facebookresearch/pifuhd" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Cloning into 'pifuhd'...\n", + "remote: Enumerating objects: 222, done.\u001b[K\n", + "remote: Counting objects: 100% (126/126), done.\u001b[K\n", + "remote: Compressing objects: 100% (44/44), done.\u001b[K\n", + "remote: Total 222 (delta 92), reused 82 (delta 82), pack-reused 96 (from 1)\u001b[K\n", + "Receiving objects: 100% (222/222), 399.35 KiB | 15.36 MiB/s, done.\n", + "Resolving deltas: 100% (114/114), done.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QvQm-A8ESKb2" + }, + "source": [ + "## Configure input data" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "xvle9T10fB6g", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "a3ac072e-9669-4a61-b185-3fa9fe9168dc" + }, + "source": [ + "cd /content/pifuhd/sample_images" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content/pifuhd/sample_images\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9SI7Ye1JfIim" + }, + "source": [ + "**If you want to upload your own picture, run the next cell**. Otherwise, go to the next next cell. Currently PNG, JPEG files are supported." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jaV_7Yi8fM-B" + }, + "source": [ + "# from google.colab import files\n", + "\n", + "# filename = list(files.upload().keys())[0]" + ], + "execution_count": 5, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "AEzmmB01SOZp" + }, + "source": [ + "import os\n", + "\n", + "try:\n", + " image_path = '/content/pifuhd/sample_images/%s' % filename\n", + "except:\n", + " image_path = '/content/pifuhd/sample_images/test.png' # example image\n", + "image_dir = os.path.dirname(image_path)\n", + "file_name = os.path.splitext(os.path.basename(image_path))[0]\n", + "\n", + "# output pathes\n", + "obj_path = '/content/pifuhd/results/pifuhd_final/recon/result_%s_256.obj' % file_name\n", + "out_img_path = '/content/pifuhd/results/pifuhd_final/recon/result_%s_256.png' % file_name\n", + "video_path = '/content/pifuhd/results/pifuhd_final/recon/result_%s_256.mp4' % file_name\n", + "video_display_path = '/content/pifuhd/results/pifuhd_final/result_%s_256_display.mp4' % file_name" + ], + "execution_count": 6, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "896EC7iQfXkj", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "723eaa7f-a793-479f-f261-c1b07ce79f26" + }, + "source": [ + "cd /content" + ], + "execution_count": 7, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JbVmda9J5TDL" + }, + "source": [ + "## Preprocess (for cropping image)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "UtMjWGNU5STe", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "9eae1a92-7a4d-4da8-cc99-6722406da747" + }, + "source": [ + "!git clone https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch.git" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Cloning into 'lightweight-human-pose-estimation.pytorch'...\n", + "remote: Enumerating objects: 124, done.\u001b[K\n", + "remote: Counting objects: 100% (34/34), done.\u001b[K\n", + "remote: Compressing objects: 100% (16/16), done.\u001b[K\n", + "remote: Total 124 (delta 21), reused 19 (delta 18), pack-reused 90 (from 1)\u001b[K\n", + "Receiving objects: 100% (124/124), 230.77 KiB | 10.99 MiB/s, done.\n", + "Resolving deltas: 100% (53/53), done.\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "F-vYklhI5dab", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "36994038-c0fa-4f3f-ce38-8cf0e2dec82b" + }, + "source": [ + "cd /content/lightweight-human-pose-estimation.pytorch/" + ], + "execution_count": 9, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content/lightweight-human-pose-estimation.pytorch\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dRod9SOu77I6", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "d110f5ee-2737-4aa4-f80c-82b914466bb0" + }, + "source": [ + "!wget https://download.01.org/opencv/openvino_training_extensions/models/human_pose_estimation/checkpoint_iter_370000.pth" + ], + "execution_count": 10, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2024-08-19 14:10:40-- https://download.01.org/opencv/openvino_training_extensions/models/human_pose_estimation/checkpoint_iter_370000.pth\n", + "Resolving download.01.org (download.01.org)... 104.89.123.208, 2600:1417:3f:793::a87, 2600:1417:3f:78d::a87\n", + "Connecting to download.01.org (download.01.org)|104.89.123.208|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 87959810 (84M) [application/octet-stream]\n", + "Saving to: ‘checkpoint_iter_370000.pth’\n", + "\n", + "checkpoint_iter_370 100%[===================>] 83.88M 180MB/s in 0.5s \n", + "\n", + "2024-08-19 14:10:41 (180 MB/s) - ‘checkpoint_iter_370000.pth’ saved [87959810/87959810]\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "!pip install pycocotools" + ], + "metadata": { + "id": "5CcsRMFAARg0", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "9a97e8bc-3d8b-493c-f3a4-1a5e5513e20b" + }, + "execution_count": 11, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Requirement already satisfied: pycocotools in /usr/local/lib/python3.10/dist-packages (2.0.8)\n", + "Requirement already satisfied: matplotlib>=2.1.0 in /usr/local/lib/python3.10/dist-packages (from pycocotools) (3.7.1)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from pycocotools) (1.26.4)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (1.2.1)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (0.12.1)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (4.53.1)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (1.4.5)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (24.1)\n", + "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (9.4.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (3.1.2)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools) (2.8.2)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib>=2.1.0->pycocotools) (1.16.0)\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "# !pip uninstall numpy\n", + "# !pip install numpy==1.20.0" + ], + "metadata": { + "id": "jTHmiIYQumJq" + }, + "execution_count": 12, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "PdRcDXe38lHB" + }, + "source": [ + "import torch\n", + "import cv2\n", + "import numpy as np\n", + "from models.with_mobilenet import PoseEstimationWithMobileNet\n", + "from modules.keypoints import extract_keypoints, group_keypoints\n", + "from modules.load_state import load_state\n", + "from modules.pose import Pose, track_poses\n", + "import demo\n", + "\n", + "def get_rect(net, images, height_size):\n", + " net = net.eval()\n", + "\n", + " stride = 8\n", + " upsample_ratio = 4\n", + " num_keypoints = Pose.num_kpts\n", + " previous_poses = []\n", + " delay = 33\n", + " for image in images:\n", + " rect_path = image.replace('.%s' % (image.split('.')[-1]), '_rect.txt')\n", + " img = cv2.imread(image, cv2.IMREAD_COLOR)\n", + " orig_img = img.copy()\n", + " orig_img = img.copy()\n", + " heatmaps, pafs, scale, pad = demo.infer_fast(net, img, height_size, stride, upsample_ratio, cpu=False)\n", + "\n", + " total_keypoints_num = 0\n", + " all_keypoints_by_type = []\n", + " for kpt_idx in range(num_keypoints): # 19th for bg\n", + " total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)\n", + "\n", + " pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs)\n", + " for kpt_id in range(all_keypoints.shape[0]):\n", + " all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale\n", + " all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale\n", + " current_poses = []\n", + "\n", + " rects = []\n", + " for n in range(len(pose_entries)):\n", + " if len(pose_entries[n]) == 0:\n", + " continue\n", + " pose_keypoints = np.ones((num_keypoints, 2), dtype=int) * -1\n", + " valid_keypoints = []\n", + " for kpt_id in range(num_keypoints):\n", + " if pose_entries[n][kpt_id] != -1.0: # keypoint was found\n", + " pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])\n", + " pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])\n", + " valid_keypoints.append([pose_keypoints[kpt_id, 0], pose_keypoints[kpt_id, 1]])\n", + " valid_keypoints = np.array(valid_keypoints)\n", + "\n", + " if pose_entries[n][10] != -1.0 or pose_entries[n][13] != -1.0:\n", + " pmin = valid_keypoints.min(0)\n", + " pmax = valid_keypoints.max(0)\n", + "\n", + " center = (0.5 * (pmax[:2] + pmin[:2])).astype(int)\n", + " radius = int(0.65 * max(pmax[0]-pmin[0], pmax[1]-pmin[1]))\n", + " elif pose_entries[n][10] == -1.0 and pose_entries[n][13] == -1.0 and pose_entries[n][8] != -1.0 and pose_entries[n][11] != -1.0:\n", + " # if leg is missing, use pelvis to get cropping\n", + " center = (0.5 * (pose_keypoints[8] + pose_keypoints[11])).astype(int)\n", + " radius = int(1.45*np.sqrt(((center[None,:] - valid_keypoints)**2).sum(1)).max(0))\n", + " center[1] += int(0.05*radius)\n", + " else:\n", + " center = np.array([img.shape[1]//2,img.shape[0]//2])\n", + " radius = max(img.shape[1]//2,img.shape[0]//2)\n", + "\n", + " x1 = center[0] - radius\n", + " y1 = center[1] - radius\n", + "\n", + " rects.append([x1, y1, 2*radius, 2*radius])\n", + "\n", + " np.savetxt(rect_path, np.array(rects), fmt='%d')" + ], + "execution_count": 13, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "M6cGZD6f6IaY" + }, + "source": [ + "net = PoseEstimationWithMobileNet()\n", + "checkpoint = torch.load('checkpoint_iter_370000.pth', map_location='cpu')\n", + "load_state(net, checkpoint)\n", + "\n", + "get_rect(net.cuda(), [image_path], 512)" + ], + "execution_count": 14, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y0rgMInwTt0s" + }, + "source": [ + "## Download the Pretrained Model" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "UrIcZweSNRFI", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "e77ebb3a-80e8-48bf-ece4-ba24c1f53c4c" + }, + "source": [ + "cd /content/pifuhd/" + ], + "execution_count": 15, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content/pifuhd\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "k3jjm6HuQRk8", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "8a5a71c0-8c40-4768-b032-862978b0ccdc" + }, + "source": [ + "!sh ./scripts/download_trained_model.sh" + ], + "execution_count": 16, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "+ mkdir -p checkpoints\n", + "+ cd checkpoints\n", + "+ wget https://dl.fbaipublicfiles.com/pifuhd/checkpoints/pifuhd.pt pifuhd.pt\n", + "--2024-08-19 14:10:54-- https://dl.fbaipublicfiles.com/pifuhd/checkpoints/pifuhd.pt\n", + "Resolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 13.35.18.103, 13.35.18.15, 13.35.18.87, ...\n", + "Connecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|13.35.18.103|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1548375177 (1.4G) [application/octet-stream]\n", + "Saving to: ‘pifuhd.pt’\n", + "\n", + "pifuhd.pt 100%[===================>] 1.44G 159MB/s in 14s \n", + "\n", + "2024-08-19 14:11:07 (108 MB/s) - ‘pifuhd.pt’ saved [1548375177/1548375177]\n", + "\n", + "--2024-08-19 14:11:07-- http://pifuhd.pt/\n", + "Resolving pifuhd.pt (pifuhd.pt)... failed: Name or service not known.\n", + "wget: unable to resolve host address ‘pifuhd.pt’\n", + "FINISHED --2024-08-19 14:11:07--\n", + "Total wall clock time: 14s\n", + "Downloaded: 1 files, 1.4G in 14s (108 MB/s)\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6heKcA-0QEBw" + }, + "source": [ + "## Run PIFuHD!\n" + ] + }, + { + "cell_type": "code", + "source": [ + "import numpy" + ], + "metadata": { + "id": "JECF8rO-ySkT" + }, + "execution_count": 17, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "numpy.version.version" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 36 + }, + "id": "FsyHgfSoyjxj", + "outputId": "ce315da9-115a-46fa-9399-f245738bc6b9" + }, + "execution_count": 18, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'1.26.4'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 18 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5995t2PnQTmG", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "aef871ca-9a8f-453c-9f44-02069301b5cd" + }, + "source": [ + "# Warning: all images with the corresponding rectangle files under -i will be processed.\n", + "!python -m apps.simple_test -r 256 --use_rect -i $image_dir\n", + "\n", + "# seems that 256 is the maximum resolution that can fit into Google Colab.\n", + "# If you want to reconstruct a higher-resolution mesh, please try with your own machine." + ], + "execution_count": 19, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Resuming from ./checkpoints/pifuhd.pt\n", + "Warning: opt is overwritten.\n", + "test data size: 1\n", + "initialize network with normal\n", + "initialize network with normal\n", + "generate mesh (test) ...\n", + " 0% 0/1 [00:00=0.1.6 (from fvcore)\n", + " Downloading yacs-0.1.8-py3-none-any.whl.metadata (639 bytes)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from fvcore) (6.0.2)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from fvcore) (4.66.5)\n", + "Requirement already satisfied: termcolor>=1.1 in /usr/local/lib/python3.10/dist-packages (from fvcore) (2.4.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from fvcore) (9.4.0)\n", + "Requirement already satisfied: tabulate in /usr/local/lib/python3.10/dist-packages (from fvcore) (0.9.0)\n", + "Requirement already satisfied: typing_extensions in /usr/local/lib/python3.10/dist-packages (from iopath) (4.12.2)\n", + "Collecting portalocker (from iopath)\n", + " Downloading portalocker-2.10.1-py3-none-any.whl.metadata (8.5 kB)\n", + "Downloading yacs-0.1.8-py3-none-any.whl (14 kB)\n", + "Downloading portalocker-2.10.1-py3-none-any.whl (18 kB)\n", + "Building wheels for collected packages: fvcore, iopath\n", + " Building wheel for fvcore (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for fvcore: filename=fvcore-0.1.5.post20221221-py3-none-any.whl size=61395 sha256=261e403ecfe298c1f5ee9d92c857ad05ad1da7ffdd20847907dbd243b52f9edc\n", + " Stored in directory: /root/.cache/pip/wheels/01/c0/af/77c1cf53a1be9e42a52b48e5af2169d40ec2e89f7362489dd0\n", + " Building wheel for iopath (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for iopath: filename=iopath-0.1.10-py3-none-any.whl size=31529 sha256=17ecad6f244d26b7609e55888c220521833c490e67a1bfe9405f20b47f9ff1ff\n", + " Stored in directory: /root/.cache/pip/wheels/9a/a3/b6/ac0fcd1b4ed5cfeb3db92e6a0e476cfd48ed0df92b91080c1d\n", + "Successfully built fvcore iopath\n", + "Installing collected packages: yacs, portalocker, iopath, fvcore\n", + "Successfully installed fvcore-0.1.5.post20221221 iopath-0.1.10 portalocker-2.10.1 yacs-0.1.8\n", + "Looking in links: https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt231/download.html\n", + "Collecting pytorch3d\n", + " Downloading https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt231/pytorch3d-0.7.6-cp310-cp310-linux_x86_64.whl (20.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.5/20.5 MB\u001b[0m \u001b[31m146.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: fvcore in /usr/local/lib/python3.10/dist-packages (from pytorch3d) (0.1.5.post20221221)\n", + "Requirement already satisfied: iopath in /usr/local/lib/python3.10/dist-packages (from pytorch3d) (0.1.10)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (1.26.4)\n", + "Requirement already satisfied: yacs>=0.1.6 in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (0.1.8)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (6.0.2)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (4.66.5)\n", + "Requirement already satisfied: termcolor>=1.1 in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (2.4.0)\n", + "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (9.4.0)\n", + "Requirement already satisfied: tabulate in /usr/local/lib/python3.10/dist-packages (from fvcore->pytorch3d) (0.9.0)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from iopath->pytorch3d) (4.12.2)\n", + "Requirement already satisfied: portalocker in /usr/local/lib/python3.10/dist-packages (from iopath->pytorch3d) (2.10.1)\n", + "Installing collected packages: pytorch3d\n", + "Successfully installed pytorch3d-0.7.6\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from lib.colab_util import generate_video_from_obj, set_renderer, video\n", + "\n", + "renderer = set_renderer()" + ], + "metadata": { + "id": "9bMFZ9bDz5rh" + }, + "execution_count": 21, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "afwL_-ROCmDf", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 356 + }, + "outputId": "06a5eecd-1032-4844-a29b-00990fe638a5" + }, + "source": [ + "\n", + "generate_video_from_obj(obj_path, out_img_path, video_path, renderer)\n", + "\n", + "# we cannot play a mp4 video generated by cv2\n", + "!ffmpeg -i $video_path -vcodec libx264 $video_display_path -y -loglevel quiet\n", + "video(video_display_path)" + ], + "execution_count": 22, + "outputs": [ + { + "output_type": "error", + "ename": "FileNotFoundError", + "evalue": "[Errno 2] No such file or directory: '/content/pifuhd/results/pifuhd_final/recon/result_test_256.obj'", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mgenerate_video_from_obj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout_img_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvideo_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrenderer\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;31m# we cannot play a mp4 video generated by cv2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msystem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'ffmpeg -i $video_path -vcodec libx264 $video_display_path -y -loglevel quiet'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mvideo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvideo_display_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/pifuhd/lib/colab_util.py\u001b[0m in \u001b[0;36mgenerate_video_from_obj\u001b[0;34m(obj_path, image_path, video_path, renderer)\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0;31m# Load obj file\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 109\u001b[0;31m \u001b[0mverts_rgb_colors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_verts_rgb_colors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 110\u001b[0m \u001b[0mverts_rgb_colors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mverts_rgb_colors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[0mtextures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTexturesVertex\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mverts_features\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mverts_rgb_colors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/pifuhd/lib/colab_util.py\u001b[0m in \u001b[0;36mget_verts_rgb_colors\u001b[0;34m(obj_path)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mrgb_colors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0mlines\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreadlines\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mline\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mlines\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/content/pifuhd/results/pifuhd_final/recon/result_test_256.obj'" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eUEXAvcvkVYV" + }, + "source": [ + "## Tips for Inputs: My results are broken!\n", + "\n", + "(Kudos to those who share results on twitter with [#pifuhd](https://twitter.com/search?q=%23pifuhd&src=recent_search_click&f=live) tag!!!!)\n", + "\n", + "Due to the limited variation in the training data, your results might be broken sometimes. Here I share some useful tips to get resonable results.\n", + "\n", + "* Use high-res image. The model is trained with 1024x1024 images. Use at least 512x512 with fine-details. Low-res images and JPEG artifacts may result in unsatisfactory results.\n", + "* Use an image with a single person. If the image contain multiple people, reconstruction quality is likely degraded.\n", + "* Front facing with standing works best (or with fashion pose)\n", + "* The entire body is covered within the image. (Note: now missing legs is partially supported)\n", + "* Make sure the input image is well lit. Exteremy dark or bright image and strong shadow often create artifacts.\n", + "* I recommend nearly parallel camera angle to the ground. High camera height may result in distorted legs or high heels.\n", + "* If the background is cluttered, use less complex background or try removing it using https://www.remove.bg/ before processing.\n", + "* It's trained with human only. Anime characters may not work well (To my surprise, indeed many people tried it!!).\n", + "* Search on twitter with [#pifuhd](https://twitter.com/search?q=%23pifuhd&src=recent_search_click&f=live) tag to get a better sense of what succeeds and what fails.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "u6U0K5CNAO_u" + }, + "source": [ + "## Share your result!\n", + "Please share your results with[ #pifuhd](https://twitter.com/search?q=%23pifuhd&src=recent_search_click&f=live) tag on Twitter. Sharing your good/bad results helps and encourages the authors to further push towards producition-quality human digitization at home.\n", + "**As the tweet buttom below doesn't add the result video automatically, please download the result video above and manually add it to the tweet.**" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1CBxbdrM9F-9" + }, + "source": [ + "import IPython\n", + "IPython.display.HTML('Tweet #pifuhd (Don\\'t forget to add your result to the tweet!)')" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2d-1pR8UR7PR" + }, + "source": [ + "## Cool Applications\n", + "Special thanks to those who play with PIFuHD and came up with many creative applications!! If you made any cool applications, please tweet your demo with [#pifuhd](https://twitter.com/search?q=%23pifuhd&src=recent_search_click&f=live). I'm constantly checking results there.\n", + "If you need complete texture on the mesh, please try my previous work [PIFu](https://github.com/shunsukesaito/PIFu) as well! It supports 3D reconstruction + texturing from a single image although the geometry quality may not be as good as PIFuHD." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "68JDAYJFSFMV" + }, + "source": [ + "IPython.display.HTML('

Rigging (Mixamo) + Photoreal Rendering (Blender)

vcs ainda tem a PACHORRA de me dizer que eu não sei dançar#b3d #pifuhd pic.twitter.com/kHCnLh6zxH

— lukas arendero (@lukazvd) June 21, 2020

FaceApp + Rigging (Mixamo)

カツラかぶってる自分に見える #pifuhd pic.twitter.com/V8o7VduTiG

— Shuhei Tsuchida (@shuhei2306) June 21, 2020

Rigging (Mixamo) + AR (Adobe Aero)

写真→PIFuHD→Mixamo→AdobeAeroでサウンド付きARを作成。Zip化してLINEでARコンテンツを共有。
写真が1枚あれば簡単にARの3Dアニメーションが作れる時代…凄い。#PIFuHD #AdobeAero #Mixamo pic.twitter.com/CbiMi4gZ0K

— モジョン (@mojon1) June 17, 2020

3D Printing

#pifuhd 楽しい〜
小さい自分プリントした pic.twitter.com/4qyWuij0Hs

— isb (@vxzxzxzxv) June 17, 2020
')" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "lX5CTTW_KWhQ" + }, + "source": [], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file