Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
273 changes: 273 additions & 0 deletions build_windows.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,273 @@
@echo off
cd /d %~dp0
Title Trellis2 BUILD (no wheels) (WINDOWS + Python 3.13 + Torch 2.9 + Cuda 13)

setlocal EnableExtensions EnableDelayedExpansion
call :set_colors

echo %green%=====================================%reset%
echo %green% Building TRELLIS.2 (WINDOWS) %reset%
echo %green%=====================================%reset%
echo.

:: -------------------------------------------------
:: Resolve Python
:: -------------------------------------------------
for %%P in (python.exe) do set "PYTHON_PATH=%%~$PATH:P"

if not defined PYTHON_PATH (
echo %red%ERROR: Python not found in PATH%reset%
echo Install Python from https://www.python.org
exit /b 1
)

echo Using Python:
%PYTHON_PATH% --version
echo Location: %PYTHON_PATH%
echo.

:: -------------------------------------------------
:: Version checks
:: -------------------------------------------------
call :get_versions

if not "%PYTHON_VERSION%"=="3.13" (
echo %red%ERROR: Python 3.13 is required%reset%
exit /b 1
)

if not "%TORCH_VERSION%"=="2.9" (
echo %red%ERROR: Torch 2.9.x required%reset%
echo Detected Torch: %TORCH_VERSION%
exit /b 1
)

if not "%CUDA_VERSION%"=="13.0" (
echo %red%ERROR: CUDA 13.0 required%reset%
echo Detected CUDA: %CUDA_VERSION%
exit /b 1
)
Comment on lines +45 to +49
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The CUDA version check is very strict, requiring exactly CUDA 13.0. However, the description mentions "CUDA 12+" support. Users with CUDA 12.x might be unable to use this build script even though it should theoretically work. Consider making this check more flexible to accept CUDA 12.x and 13.x versions, or update the error message to clarify that only CUDA 13.0 is officially supported by this script.

Copilot uses AI. Check for mistakes.

:: -------------------------------------------------
:: Preconditions
:: -------------------------------------------------
echo Assumptions:
echo - Torch 2.9.1 + cu130 already installed
echo - CUDA Toolkit 13.0 installed (nvcc available)
echo - VC++ Redistributable installed
echo.

where nvcc
if errorlevel 1 (
echo %yellow%WARNING: nvcc not found in PATH%reset%
echo FlashAttention build may fail
)
echo.

:: ===============================
:: MSVC (Visual Studio Build Tools)
:: ===============================
echo.
echo ===============================================
echo Configuring MSVC toolchain
echo ===============================================
echo.

set "MSVC_VCVARS=C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvars64.bat"

echo Using MSVC environment script:
echo %MSVC_VCVARS%
echo.

if not exist "%MSVC_VCVARS%" (
echo ERROR: MSVC vcvars64.bat not found
echo.
echo Expected location:
echo %MSVC_VCVARS%
echo.
echo If your Visual Studio is installed elsewhere, edit this path in the script.
echo Required components:
echo - Desktop development with C++
echo - MSVC v143 build tools
echo - Windows 10/11 SDK
echo.
exit /b 1
)

call "%MSVC_VCVARS%"

:: ===============================
:: BUILD FLAGS
:: ===============================
set DISTUTILS_USE_SDK=1
set MSSdk=1
set USE_NINJA=1
set MAX_JOBS=16
set CMAKE_BUILD_PARALLEL_LEVEL=16

echo.
echo Building with:
echo DISTUTILS_USE_SDK=%DISTUTILS_USE_SDK%
echo MSSdk=%MSSdk%
echo USE_NINJA=%USE_NINJA%
echo MAX_JOBS=%MAX_JOBS%
echo CMAKE_BUILD_PARALLEL_LEVEL=%CMAKE_BUILD_PARALLEL_LEVEL%
echo.

:: -------------------------------------------------
:: CUDA runtime DLL visibility (safe)
:: -------------------------------------------------
if defined CUDA_PATH call set "PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%"

:: ===============================
:: TOOLS
:: ===============================
%PYTHON_PATH% -m pip install -U setuptools wheel ninja cmake pybind11 packaging

:: ===============================
:: RUNTIME DEPS
:: ===============================
%PYTHON_PATH% -m pip install ^
trimesh ^
shapely ^
rtree ^
easydict ^
plyfile ^
zstandard

:: =================================================
:: nvdiffrast
:: =================================================
echo.
echo %green%Building nvdiffrast...%reset%

pushd nvdiffrast
%PYTHON_PATH% -m pip uninstall -y nvdiffrast
%PYTHON_PATH% -m pip install git+https://github.com/NVlabs/nvdiffrast.git --no-build-isolation --no-cache-dir
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This build script installs nvdiffrast directly from the moving HEAD of the NVlabs/nvdiffrast GitHub repository via pip install git+https://github.com/NVlabs/nvdiffrast.git, which introduces an unpinned third-party dependency executed with your build/runtime privileges. If that repository or its dependency chain is compromised, an attacker could execute arbitrary code during installation or later when the package is imported on any system running this script. To mitigate this supply-chain risk, pin nvdiffrast to a trusted immutable identifier (such as a specific commit, tag, or published wheel version) and avoid executing unverified remote code in automated build scripts.

Copilot uses AI. Check for mistakes.
popd

echo %green%nvdiffrast build finished...%reset%

echo.
echo %green%Verifying nvdiffrast...%reset%
%PYTHON_PATH% -c "import nvdiffrast.torch as dr; print('nvdiffrast.torch OK')" || (
echo %red%ERROR: nvdiffrast failed to load%reset%
exit /b 1
)

:: ===============================
:: FlexGEMM
:: ===============================
echo.
echo %green%Building FlexGEMM...%reset%

pushd flexgemm
%PYTHON_PATH% -m pip uninstall -y flex_gemm
%PYTHON_PATH% -m pip install git+https://github.com/JeffreyXiang/FlexGEMM --no-build-isolation --no-cache-dir
popd

echo %green%FlexGEMM build finished...%reset%

:: ===============================
:: CuMesh
:: ===============================
echo.
echo %green%Building CuMesh...%reset%

pushd cumesh
%PYTHON_PATH% -m pip uninstall -y cumesh
%PYTHON_PATH% -m pip install git+https://github.com/JeffreyXiang/CuMesh --no-build-isolation --no-cache-dir
Comment on lines +163 to +174
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The PR description mentions that PRs #11 (FlexGEMM) and #21 (CuMesh) need to be merged first, but this script installs directly from the main branches of those repositories without checking if those PRs are merged. If those PRs haven't been merged, the build may fail. Consider adding a note in the script comments about this prerequisite or checking for the required fixes.

Copilot uses AI. Check for mistakes.
popd

echo %green%CuMesh build finished...%reset%

:: ===============================
:: o-voxel
:: ===============================
echo.
echo %green%Building o_voxel...%reset%

pushd trellis2\o-voxel
%PYTHON_PATH% -m pip uninstall -y o_voxel
git submodule update --init --recursive
%PYTHON_PATH% setup.py install
popd

echo %green%voxel build finished...%reset%

echo.
echo %green%Verifying voxel...%reset%
%PYTHON_PATH% -c "import o_voxel; print('Voxel OK')" || (
echo %red%ERROR: Voxel failed to load%reset%
exit /b 1
)

:: =================================================
:: Verification (core environment)
:: =================================================
echo.
echo %green%Verifying core environment...%reset%

%PYTHON_PATH% -c "import torch,numpy; print('Torch:',torch.__version__); print('CUDA:',torch.cuda.is_available()); print('NumPy:',numpy.__version__)" || (
echo %red%ERROR: Core env failed%reset%
exit /b 1
)

:: =================================================
:: Verification (native extensions)
:: =================================================
echo.
echo %green%Verifying native extensions (best-effort)...%reset%

set "VERIFY_PY=%TEMP%\verify_native_ext.py"

echo import importlib > "%VERIFY_PY%"
echo mods = [^('cumesh','CuMesh'^),^('flex_gemm','FlexGEMM'^),^('o_voxel','Voxel'^),^('nvdiffrast','nvdiffrast'^)] >> "%VERIFY_PY%"
echo for m,n in mods: >> "%VERIFY_PY%"
echo. try: >> "%VERIFY_PY%"
echo. importlib.import_module(m) >> "%VERIFY_PY%"
echo. print('[OK]',n) >> "%VERIFY_PY%"
echo. except Exception as e: >> "%VERIFY_PY%"
echo. print('[WARN]',n,'FAILED',e) >> "%VERIFY_PY%"

%PYTHON_PATH% "%VERIFY_PY%"
del "%VERIFY_PY%"

echo.
echo %green%=====================================%reset%
echo %green% TRELLIS.2 INSTALL COMPLETE %reset%
echo %green%=====================================%reset%
pause
exit /b 0

:: -------------------------------------------------
:: Helpers
:: -------------------------------------------------
:set_colors
set red=
set green=
set yellow=
set reset=
goto :eof

:get_versions
echo Checking versions...
echo.

for /f "tokens=2" %%i in ('"%PYTHON_PATH%" --version 2^>^&1') do (
for /f "tokens=1,2 delims=." %%a in ("%%i") do set PYTHON_VERSION=%%a.%%b
)

"%PYTHON_PATH%" -c "import torch; print(torch.__version__)" > temp_torch.txt
for /f "tokens=1,2 delims=." %%a in (temp_torch.txt) do set TORCH_VERSION=%%a.%%b
del temp_torch.txt

"%PYTHON_PATH%" -c "import torch; print(torch.version.cuda)" > temp_cuda.txt
for /f "tokens=1,2 delims=." %%a in (temp_cuda.txt) do set CUDA_VERSION=%%a.%%b
del temp_cuda.txt

echo Python: %PYTHON_VERSION%
echo Torch : %TORCH_VERSION%
echo CUDA : %CUDA_VERSION%
echo.
goto :eof
17 changes: 13 additions & 4 deletions o-voxel/setup.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from setuptools import setup
from torch.utils.cpp_extension import CUDAExtension, BuildExtension, IS_HIP_EXTENSION
import os
import platform

ROOT = os.path.dirname(os.path.abspath(__file__))
BUILD_TARGET = os.environ.get("BUILD_TARGET", "auto")
Expand All @@ -22,6 +23,17 @@
archs = os.getenv("GPU_ARCHS", "native").split(";")
cc_flag = [f"--offload-arch={arch}" for arch in archs]

if platform.system() == "Windows":
extra_compile_args = {
"cxx": ["/O2", "/std:c++17", "/EHsc", "/permissive-", "/Zc:__cplusplus"],
"nvcc": ["-O3", "-std=c++17", "-Xcompiler=/std:c++17", "-Xcompiler=/EHsc", "-Xcompiler=/permissive-", "-Xcompiler=/Zc:__cplusplus"] + cc_flag,
}
else:
extra_compile_args = {
"cxx": ["-O2", "-std=c++17"],
"nvcc": ["-O3","-std=c++17"] + cc_flag
}

setup(
name="o_voxel",
packages=[
Expand Down Expand Up @@ -55,10 +67,7 @@
include_dirs=[
os.path.join(ROOT, "third_party/eigen"),
],
extra_compile_args={
"cxx": ["-O3", "-std=c++17"],
"nvcc": ["-O3","-std=c++17"] + cc_flag,
}
extra_compile_args=extra_compile_args
)
],
cmdclass={
Expand Down
4 changes: 2 additions & 2 deletions o-voxel/src/convert/flexible_dual_grid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ void boundry_qef(
// Calculate the QEF for the edge (boundary) defined by v0 and v1
Eigen::Vector3d dir(v1.x() - v0.x(), v1.y() - v0.y(), v1.z() - v0.z());
double segment_length = dir.norm();
if (segment_length < 1e-6d) continue; // Skip degenerate edges (zero-length)
if (segment_length < 1e-6) continue; // Skip degenerate edges (zero-length)
dir.normalize(); // unit direction vector

// Projection matrix orthogonal to the direction: I - d d^T
Expand Down Expand Up @@ -334,7 +334,7 @@ void boundry_qef(

Eigen::Vector3d tMax, tDelta;
for (int axis = 0; axis < 3; ++axis) {
if (dir[axis] == 0.0d) {
if (dir[axis] == 0.0) {
tMax[axis] = std::numeric_limits<double>::infinity();
tDelta[axis] = std::numeric_limits<double>::infinity();
} else {
Expand Down
4 changes: 2 additions & 2 deletions o-voxel/src/io/filter_neighbor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ torch::Tensor encode_sparse_voxel_octree_attr_neighbor_cpu(
}

// Pack the deltas into a uint8 tensor
torch::Tensor delta = torch::zeros({N, C}, torch::dtype(torch::kUInt8));
torch::Tensor delta = torch::zeros({static_cast<int64_t>(N), static_cast<int64_t>(C)}, torch::dtype(torch::kUInt8));
uint8_t* delta_data = delta.data_ptr<uint8_t>();
for (int i = 0; i < N; i++) {
int x = coord_data[i * 3 + 0];
Expand Down Expand Up @@ -163,7 +163,7 @@ torch::Tensor decode_sparse_voxel_octree_attr_neighbor_cpu(
}

// Pack the attribute into a uint8 tensor
torch::Tensor attr = torch::zeros({N, C}, torch::dtype(torch::kUInt8));
torch::Tensor attr = torch::zeros({static_cast<int64_t>(N), static_cast<int64_t>(C)}, torch::dtype(torch::kUInt8));
uint8_t* attr_data = attr.data_ptr<uint8_t>();
for (int i = 0; i < N; i++) {
int x = coord_data[i * 3 + 0];
Expand Down
4 changes: 2 additions & 2 deletions o-voxel/src/io/filter_parent.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ torch::Tensor encode_sparse_voxel_octree_attr_parent_cpu(
uint8_t* octree_data = octree.data_ptr<uint8_t>();
uint8_t* attr_data = attr.data_ptr<uint8_t>();

torch::Tensor delta = torch::zeros({N_leaf, C}, torch::kUInt8);
torch::Tensor delta = torch::zeros({static_cast<int64_t>(N_leaf), static_cast<int64_t>(C)}, torch::kUInt8);
uint32_t svo_ptr = 0;
uint32_t attr_ptr = 0;
uint32_t delta_ptr = C;
Expand Down Expand Up @@ -151,7 +151,7 @@ torch::Tensor decode_sparse_voxel_octree_attr_parent_cpu(
uint8_t* octree_data = octree.data_ptr<uint8_t>();
uint8_t* delta_data = delta.data_ptr<uint8_t>();

torch::Tensor attr = torch::zeros({N_leaf, C}, torch::kUInt8);
torch::Tensor attr = torch::zeros({static_cast<int64_t>(N_leaf), static_cast<int64_t>(C)}, torch::kUInt8);
uint32_t svo_ptr = 0;
uint32_t attr_ptr = 0;
uint32_t delta_ptr = C;
Expand Down
4 changes: 2 additions & 2 deletions o-voxel/src/io/svo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ torch::Tensor encode_sparse_voxel_octree_cpu(
}

// Convert SVO to tensor
torch::Tensor svo_tensor = torch::from_blob(svo.data(), {svo.size()}, torch::kUInt8).clone();
torch::Tensor svo_tensor = torch::from_blob(svo.data(), {static_cast<int64_t>(svo.size())}, torch::kUInt8).clone();
return svo_tensor;
}

Expand Down Expand Up @@ -133,6 +133,6 @@ torch::Tensor decode_sparse_voxel_octree_cpu(
// Decode SVO into list of codes
decode_sparse_voxel_octree_cpu_recursive(octree_data, depth, ptr, stack, codes);
// Convert codes to tensor
torch::Tensor codes_tensor = torch::from_blob(codes.data(), {codes.size()}, torch::kInt32).clone();
torch::Tensor codes_tensor = torch::from_blob(codes.data(), {static_cast<int64_t>(codes.size())}, torch::kInt32).clone();
return codes_tensor;
}
Loading