diff --git a/README.md b/README.md
index 1c0ad06b..77f1dab0 100644
--- a/README.md
+++ b/README.md
@@ -7,6 +7,7 @@
+[](http://colab.research.google.com/github/microsoft/TRELLIS.2/blob/main/example.ipynb)
https://github.com/user-attachments/assets/63b43a7e-acc7-4c81-a900-6da450527d8f
diff --git a/example.ipynb b/example.ipynb
new file mode 100644
index 00000000..7d9a685d
--- /dev/null
+++ b/example.ipynb
@@ -0,0 +1,141 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "910a71f8",
+ "metadata": {},
+ "source": [
+ "# TRELLIS.2 Inference on Google Colab\n",
+ "This notebook sets up the environment to run Microsoft's 4-billion parameter TRELLIS.2 model on a standard Google Colab (Tested using L4 GPU)\n",
+ "\n",
+ "**Prerequisites:**\n",
+ "Before running this notebook, you must have a Hugging Face account and accept the usage agreements for the following gated models:\n",
+ "1. Background Removal: [briaai/RMBG-2.0](https://huggingface.co/briaai/RMBG-2.0)\n",
+ "2. Image Conditioning: [Facebook DINOv3](https://huggingface.co/facebook/dinov3-vitl16-pretrain-lvd1689m)\n",
+ "\n",
+ "You will also need to add your Hugging Face Read Token to Colab's **Secrets** tab (the key icon on the left sidebar) and name it `HF_TOKEN`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dc1f9fc5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Clone the repository and install all custom CUDA extensions\n",
+ "!git clone -b main https://github.com/microsoft/TRELLIS.2.git --recursive\n",
+ "\n",
+ "%cd /content/TRELLIS.2/\n",
+ "\n",
+ "# Run the setup script to compile cumesh, o-voxel, and flexgemm\n",
+ "!. ./setup.sh --new-env --basic --flash-attn --nvdiffrast --nvdiffrec --cumesh --o-voxel --flexgemm\n",
+ "\n",
+ "# Downgrade libraries to avoid meta tensor and internal utility conflicts\n",
+ "!pip install \"Pillow<10.0.0\"\n",
+ "!pip install \"transformers<5.0.0\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "13256cfb",
+ "metadata": {},
+ "source": [
+ "### 🛑 STOP AND RESTART RUNTIME\n",
+ "Because we installed specific versions of `Pillow` and `transformers`, Python will crash if you do not clear its active memory. \n",
+ "\n",
+ "Go to the top menu: **Runtime > Restart session** (or Restart runtime). \n",
+ "Do **not** run the setup cell above again. Proceed directly to the cell below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ee4dc609",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Re-enter the directory after the restart\n",
+ "%cd /content/TRELLIS.2/\n",
+ "\n",
+ "from huggingface_hub import login\n",
+ "from google.colab import userdata\n",
+ "\n",
+ "# Authenticate with Hugging Face to download the gated models\n",
+ "print(\"Logging into Hugging Face...\")\n",
+ "hf_token = userdata.get('HF_TOKEN')\n",
+ "login(hf_token)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "72aa6280",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'\n",
+ "os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\" # Can save GPU memory\n",
+ "import cv2\n",
+ "import imageio\n",
+ "from PIL import Image\n",
+ "import torch\n",
+ "from trellis2.pipelines import Trellis2ImageTo3DPipeline\n",
+ "from trellis2.utils import render_utils\n",
+ "from trellis2.renderers import EnvMap\n",
+ "import o_voxel\n",
+ "\n",
+ "# 1. Setup Environment Map\n",
+ "envmap = EnvMap(torch.tensor(\n",
+ " cv2.cvtColor(cv2.imread('assets/hdri/forest.exr', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB),\n",
+ " dtype=torch.float32, device='cuda'\n",
+ "))\n",
+ "\n",
+ "# 2. Load Pipeline\n",
+ "pipeline = Trellis2ImageTo3DPipeline.from_pretrained(\"microsoft/TRELLIS.2-4B\")\n",
+ "pipeline.cuda()\n",
+ "\n",
+ "# 3. Load Image & Run\n",
+ "image = Image.open(\"assets/example_image/T.png\")\n",
+ "mesh = pipeline.run(image)[0]\n",
+ "mesh.simplify(16777216) # nvdiffrast limit\n",
+ "\n",
+ "# 4. Render Video\n",
+ "video = render_utils.make_pbr_vis_frames(render_utils.render_video(mesh, envmap=envmap))\n",
+ "imageio.mimsave(\"sample.mp4\", video, fps=15)\n",
+ "\n",
+ "# 5. Export to GLB\n",
+ "glb = o_voxel.postprocess.to_glb(\n",
+ " vertices = mesh.vertices,\n",
+ " faces = mesh.faces,\n",
+ " attr_volume = mesh.attrs,\n",
+ " coords = mesh.coords,\n",
+ " attr_layout = mesh.layout,\n",
+ " voxel_size = mesh.voxel_size,\n",
+ " aabb = [[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]],\n",
+ " decimation_target = 1000000,\n",
+ " texture_size = 4096,\n",
+ " remesh = True,\n",
+ " remesh_band = 1,\n",
+ " remesh_project = 0,\n",
+ " verbose = True\n",
+ ")\n",
+ "glb.export(\"sample.glb\", extension_webp=True)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python",
+ "version": "3.10.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}