This commit is contained in:
Huiwenshi
2025-06-13 23:53:14 +08:00
parent 70ee89e0a2
commit c88bee648e
581 changed files with 30365 additions and 1 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import math
import numpy as np
import torch
def transform_pos(mtx, pos, keepdim=False):
t_mtx = torch.from_numpy(mtx).to(pos.device) if isinstance(mtx, np.ndarray) else mtx
if pos.shape[-1] == 3:
posw = torch.cat([pos, torch.ones([pos.shape[0], 1]).to(pos.device)], axis=1)
else:
posw = pos
if keepdim:
return torch.matmul(posw, t_mtx.t())[...]
else:
return torch.matmul(posw, t_mtx.t())[None, ...]
def get_mv_matrix(elev, azim, camera_distance, center=None):
elev = -elev
azim += 90
elev_rad = math.radians(elev)
azim_rad = math.radians(azim)
camera_position = np.array(
[
camera_distance * math.cos(elev_rad) * math.cos(azim_rad),
camera_distance * math.cos(elev_rad) * math.sin(azim_rad),
camera_distance * math.sin(elev_rad),
]
)
if center is None:
center = np.array([0, 0, 0])
else:
center = np.array(center)
lookat = center - camera_position
lookat = lookat / np.linalg.norm(lookat)
up = np.array([0, 0, 1.0])
right = np.cross(lookat, up)
right = right / np.linalg.norm(right)
up = np.cross(right, lookat)
up = up / np.linalg.norm(up)
c2w = np.concatenate([np.stack([right, up, -lookat], axis=-1), camera_position[:, None]], axis=-1)
w2c = np.zeros((4, 4))
w2c[:3, :3] = np.transpose(c2w[:3, :3], (1, 0))
w2c[:3, 3:] = -np.matmul(np.transpose(c2w[:3, :3], (1, 0)), c2w[:3, 3:])
w2c[3, 3] = 1.0
return w2c.astype(np.float32)
def get_orthographic_projection_matrix(left=-1, right=1, bottom=-1, top=1, near=0, far=2):
"""
计算正交投影矩阵。
参数:
left (float): 投影区域左侧边界。
right (float): 投影区域右侧边界。
bottom (float): 投影区域底部边界。
top (float): 投影区域顶部边界。
near (float): 投影区域近裁剪面距离。
far (float): 投影区域远裁剪面距离。
返回:
numpy.ndarray: 正交投影矩阵。
"""
ortho_matrix = np.eye(4, dtype=np.float32)
ortho_matrix[0, 0] = 2 / (right - left)
ortho_matrix[1, 1] = 2 / (top - bottom)
ortho_matrix[2, 2] = -2 / (far - near)
ortho_matrix[0, 3] = -(right + left) / (right - left)
ortho_matrix[1, 3] = -(top + bottom) / (top - bottom)
ortho_matrix[2, 3] = -(far + near) / (far - near)
return ortho_matrix
def get_perspective_projection_matrix(fovy, aspect_wh, near, far):
fovy_rad = math.radians(fovy)
return np.array(
[
[1.0 / (math.tan(fovy_rad / 2.0) * aspect_wh), 0, 0, 0],
[0, 1.0 / math.tan(fovy_rad / 2.0), 0, 0],
[0, 0, -(far + near) / (far - near), -2.0 * far * near / (far - near)],
[0, 0, -1, 0],
]
).astype(np.float32)

View File

@@ -0,0 +1 @@
c++ -O3 -Wall -shared -std=c++11 -fPIC `python -m pybind11 --includes` mesh_inpaint_processor.cpp -o mesh_inpaint_processor`python3-config --extension-suffix`

View File

@@ -0,0 +1,395 @@
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <algorithm>
#include <cmath>
#include <queue>
#include <vector>
#include <functional>
namespace py = pybind11;
using namespace std;
namespace {
// 内部数据结构避免重复的buffer获取和指针设置
struct MeshData {
int texture_height, texture_width, texture_channel;
int vtx_num;
float* texture_ptr;
uint8_t* mask_ptr;
float* vtx_pos_ptr;
float* vtx_uv_ptr;
int* pos_idx_ptr;
int* uv_idx_ptr;
// 存储buffer以防止被销毁
py::buffer_info texture_buf, mask_buf, vtx_pos_buf, vtx_uv_buf, pos_idx_buf, uv_idx_buf;
MeshData(py::array_t<float>& texture, py::array_t<uint8_t>& mask,
py::array_t<float>& vtx_pos, py::array_t<float>& vtx_uv,
py::array_t<int>& pos_idx, py::array_t<int>& uv_idx) {
texture_buf = texture.request();
mask_buf = mask.request();
vtx_pos_buf = vtx_pos.request();
vtx_uv_buf = vtx_uv.request();
pos_idx_buf = pos_idx.request();
uv_idx_buf = uv_idx.request();
texture_height = texture_buf.shape[0];
texture_width = texture_buf.shape[1];
texture_channel = texture_buf.shape[2];
texture_ptr = static_cast<float*>(texture_buf.ptr);
mask_ptr = static_cast<uint8_t*>(mask_buf.ptr);
vtx_num = vtx_pos_buf.shape[0];
vtx_pos_ptr = static_cast<float*>(vtx_pos_buf.ptr);
vtx_uv_ptr = static_cast<float*>(vtx_uv_buf.ptr);
pos_idx_ptr = static_cast<int*>(pos_idx_buf.ptr);
uv_idx_ptr = static_cast<int*>(uv_idx_buf.ptr);
}
};
// 公共函数计算UV坐标
pair<int, int> calculateUVCoordinates(int vtx_uv_idx, const MeshData& data) {
int uv_v = round(data.vtx_uv_ptr[vtx_uv_idx * 2] * (data.texture_width - 1));
int uv_u = round((1.0 - data.vtx_uv_ptr[vtx_uv_idx * 2 + 1]) * (data.texture_height - 1));
return make_pair(uv_u, uv_v);
}
// 公共函数:计算距离权重
float calculateDistanceWeight(const array<float, 3>& vtx_0, const array<float, 3>& vtx1) {
float dist_weight = 1.0f / max(
sqrt(
pow(vtx_0[0] - vtx1[0], 2) +
pow(vtx_0[1] - vtx1[1], 2) +
pow(vtx_0[2] - vtx1[2], 2)
), 1E-4);
return dist_weight * dist_weight;
}
// 公共函数:获取顶点位置
array<float, 3> getVertexPosition(int vtx_idx, const MeshData& data) {
return {data.vtx_pos_ptr[vtx_idx * 3],
data.vtx_pos_ptr[vtx_idx * 3 + 1],
data.vtx_pos_ptr[vtx_idx * 3 + 2]};
}
// 公共函数:构建图结构
void buildGraph(vector<vector<int>>& G, const MeshData& data) {
G.resize(data.vtx_num);
for(int i = 0; i < data.uv_idx_buf.shape[0]; ++i) {
for(int k = 0; k < 3; ++k) {
G[data.pos_idx_ptr[i * 3 + k]].push_back(data.pos_idx_ptr[i * 3 + (k + 1) % 3]);
}
}
}
// 通用初始化函数处理两种掩码类型float和int
template<typename MaskType>
void initializeVertexDataGeneric(const MeshData& data, vector<MaskType>& vtx_mask,
vector<vector<float>>& vtx_color, vector<int>* uncolored_vtxs = nullptr,
MaskType mask_value = static_cast<MaskType>(1)) {
vtx_mask.assign(data.vtx_num, static_cast<MaskType>(0));
vtx_color.assign(data.vtx_num, vector<float>(data.texture_channel, 0.0f));
if(uncolored_vtxs) {
uncolored_vtxs->clear();
}
for(int i = 0; i < data.uv_idx_buf.shape[0]; ++i) {
for(int k = 0; k < 3; ++k) {
int vtx_uv_idx = data.uv_idx_ptr[i * 3 + k];
int vtx_idx = data.pos_idx_ptr[i * 3 + k];
auto uv_coords = calculateUVCoordinates(vtx_uv_idx, data);
if(data.mask_ptr[uv_coords.first * data.texture_width + uv_coords.second] > 0) {
vtx_mask[vtx_idx] = mask_value;
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[vtx_idx][c] = data.texture_ptr[(uv_coords.first * data.texture_width +
uv_coords.second) * data.texture_channel + c];
}
} else if(uncolored_vtxs) {
uncolored_vtxs->push_back(vtx_idx);
}
}
}
}
// 通用平滑算法:支持不同的掩码类型和检查函数
template<typename MaskType>
void performSmoothingAlgorithm(const MeshData& data, const vector<vector<int>>& G,
vector<MaskType>& vtx_mask, vector<vector<float>>& vtx_color,
const vector<int>& uncolored_vtxs,
function<bool(MaskType)> is_colored_func,
function<void(MaskType&)> set_colored_func) {
int smooth_count = 2;
int last_uncolored_vtx_count = 0;
while(smooth_count > 0) {
int uncolored_vtx_count = 0;
for(int vtx_idx : uncolored_vtxs) {
vector<float> sum_color(data.texture_channel, 0.0f);
float total_weight = 0.0f;
array<float, 3> vtx_0 = getVertexPosition(vtx_idx, data);
for(int connected_idx : G[vtx_idx]) {
if(is_colored_func(vtx_mask[connected_idx])) {
array<float, 3> vtx1 = getVertexPosition(connected_idx, data);
float dist_weight = calculateDistanceWeight(vtx_0, vtx1);
for(int c = 0; c < data.texture_channel; ++c) {
sum_color[c] += vtx_color[connected_idx][c] * dist_weight;
}
total_weight += dist_weight;
}
}
if(total_weight > 0.0f) {
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[vtx_idx][c] = sum_color[c] / total_weight;
}
set_colored_func(vtx_mask[vtx_idx]);
} else {
uncolored_vtx_count++;
}
}
if(last_uncolored_vtx_count == uncolored_vtx_count) {
smooth_count--;
} else {
smooth_count++;
}
last_uncolored_vtx_count = uncolored_vtx_count;
}
}
// 前向传播算法的通用实现
void performForwardPropagation(const MeshData& data, const vector<vector<int>>& G,
vector<float>& vtx_mask, vector<vector<float>>& vtx_color,
queue<int>& active_vtxs) {
while(!active_vtxs.empty()) {
queue<int> pending_active_vtxs;
while(!active_vtxs.empty()) {
int vtx_idx = active_vtxs.front();
active_vtxs.pop();
array<float, 3> vtx_0 = getVertexPosition(vtx_idx, data);
for(int connected_idx : G[vtx_idx]) {
if(vtx_mask[connected_idx] > 0) continue;
array<float, 3> vtx1 = getVertexPosition(connected_idx, data);
float dist_weight = calculateDistanceWeight(vtx_0, vtx1);
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[connected_idx][c] += vtx_color[vtx_idx][c] * dist_weight;
}
if(vtx_mask[connected_idx] == 0) {
pending_active_vtxs.push(connected_idx);
}
vtx_mask[connected_idx] -= dist_weight;
}
}
while(!pending_active_vtxs.empty()) {
int vtx_idx = pending_active_vtxs.front();
pending_active_vtxs.pop();
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[vtx_idx][c] /= -vtx_mask[vtx_idx];
}
vtx_mask[vtx_idx] = 1.0f;
active_vtxs.push(vtx_idx);
}
}
}
// 公共函数:创建输出数组
pair<py::array_t<float>, py::array_t<uint8_t>> createOutputArrays(
const MeshData& data, const vector<float>& vtx_mask,
const vector<vector<float>>& vtx_color) {
py::array_t<float> new_texture(data.texture_buf.size);
py::array_t<uint8_t> new_mask(data.mask_buf.size);
auto new_texture_buf = new_texture.request();
auto new_mask_buf = new_mask.request();
float* new_texture_ptr = static_cast<float*>(new_texture_buf.ptr);
uint8_t* new_mask_ptr = static_cast<uint8_t*>(new_mask_buf.ptr);
// Copy original texture and mask to new arrays
copy(data.texture_ptr, data.texture_ptr + data.texture_buf.size, new_texture_ptr);
copy(data.mask_ptr, data.mask_ptr + data.mask_buf.size, new_mask_ptr);
for(int face_idx = 0; face_idx < data.uv_idx_buf.shape[0]; ++face_idx) {
for(int k = 0; k < 3; ++k) {
int vtx_uv_idx = data.uv_idx_ptr[face_idx * 3 + k];
int vtx_idx = data.pos_idx_ptr[face_idx * 3 + k];
if(vtx_mask[vtx_idx] == 1.0f) {
auto uv_coords = calculateUVCoordinates(vtx_uv_idx, data);
for(int c = 0; c < data.texture_channel; ++c) {
new_texture_ptr[
(uv_coords.first * data.texture_width + uv_coords.second) *
data.texture_channel + c
] = vtx_color[vtx_idx][c];
}
new_mask_ptr[uv_coords.first * data.texture_width + uv_coords.second] = 255;
}
}
}
// Reshape the new arrays to match the original texture and mask shapes
new_texture.resize({data.texture_height, data.texture_width, 3});
new_mask.resize({data.texture_height, data.texture_width});
return make_pair(new_texture, new_mask);
}
// 创建顶点颜色输出数组的专用函数
pair<py::array_t<float>, py::array_t<uint8_t>> createVertexColorOutput(
const MeshData& data, const vector<int>& vtx_mask,
const vector<vector<float>>& vtx_color) {
py::array_t<float> py_vtx_color({data.vtx_num, data.texture_channel});
py::array_t<uint8_t> py_vtx_mask({data.vtx_num});
auto py_vtx_color_buf = py_vtx_color.request();
auto py_vtx_mask_buf = py_vtx_mask.request();
float* py_vtx_color_ptr = static_cast<float*>(py_vtx_color_buf.ptr);
uint8_t* py_vtx_mask_ptr = static_cast<uint8_t*>(py_vtx_mask_buf.ptr);
for(int i = 0; i < data.vtx_num; ++i) {
py_vtx_mask_ptr[i] = vtx_mask[i];
for(int c = 0; c < data.texture_channel; ++c) {
py_vtx_color_ptr[i * data.texture_channel + c] = vtx_color[i][c];
}
}
return make_pair(py_vtx_color, py_vtx_mask);
}
} // anonymous namespace
// 重构后的 meshVerticeInpaint_smooth 函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint_smooth(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
MeshData data(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
vector<float> vtx_mask;
vector<vector<float>> vtx_color;
vector<int> uncolored_vtxs;
vector<vector<int>> G;
initializeVertexDataGeneric(data, vtx_mask, vtx_color, &uncolored_vtxs, 1.0f);
buildGraph(G, data);
// 使用通用平滑算法
performSmoothingAlgorithm<float>(data, G, vtx_mask, vtx_color, uncolored_vtxs,
[](float mask_val) { return mask_val > 0; }, // 检查是否着色
[](float& mask_val) { mask_val = 1.0f; } // 设置为已着色
);
return createOutputArrays(data, vtx_mask, vtx_color);
}
// 重构后的 meshVerticeInpaint_forward 函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint_forward(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
MeshData data(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
vector<float> vtx_mask;
vector<vector<float>> vtx_color;
vector<vector<int>> G;
queue<int> active_vtxs;
// 使用通用初始化(不需要 uncolored_vtxs
initializeVertexDataGeneric(data, vtx_mask, vtx_color, nullptr, 1.0f);
buildGraph(G, data);
// 收集活跃顶点
for(int i = 0; i < data.vtx_num; ++i) {
if(vtx_mask[i] == 1.0f) {
active_vtxs.push(i);
}
}
// 使用通用前向传播算法
performForwardPropagation(data, G, vtx_mask, vtx_color, active_vtxs);
return createOutputArrays(data, vtx_mask, vtx_color);
}
// 主接口函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx, const string& method = "smooth") {
if(method == "smooth") {
return meshVerticeInpaint_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
} else if(method == "forward") {
return meshVerticeInpaint_forward(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
} else {
throw invalid_argument("Invalid method. Use 'smooth' or 'forward'.");
}
}
//============================
// 重构后的 meshVerticeColor_smooth 函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeColor_smooth(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
MeshData data(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
vector<int> vtx_mask;
vector<vector<float>> vtx_color;
vector<int> uncolored_vtxs;
vector<vector<int>> G;
initializeVertexDataGeneric(data, vtx_mask, vtx_color, &uncolored_vtxs, 1);
buildGraph(G, data);
// 使用通用平滑算法
performSmoothingAlgorithm<int>(data, G, vtx_mask, vtx_color, uncolored_vtxs,
[](int mask_val) { return mask_val > 0; }, // 检查是否着色
[](int& mask_val) { mask_val = 2; } // 设置为已着色值为2
);
return createVertexColorOutput(data, vtx_mask, vtx_color);
}
// meshVerticeColor 主接口函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeColor(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx, const string& method = "smooth") {
if(method == "smooth") {
return meshVerticeColor_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
} else {
throw invalid_argument("Invalid method. Use 'smooth' or 'forward'.");
}
}
// Python绑定
PYBIND11_MODULE(mesh_inpaint_processor, m) {
m.def("meshVerticeInpaint", &meshVerticeInpaint, "A function to process mesh",
py::arg("texture"), py::arg("mask"), py::arg("vtx_pos"), py::arg("vtx_uv"),
py::arg("pos_idx"), py::arg("uv_idx"), py::arg("method") = "smooth");
m.def("meshVerticeColor", &meshVerticeColor, "A function to process mesh",
py::arg("texture"), py::arg("mask"), py::arg("vtx_pos"), py::arg("vtx_uv"),
py::arg("pos_idx"), py::arg("uv_idx"), py::arg("method") = "smooth");
}

View File

@@ -0,0 +1,284 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import os
import cv2
import bpy
import math
import numpy as np
from io import StringIO
from typing import Optional, Tuple, Dict, Any
def _safe_extract_attribute(obj: Any, attr_path: str, default: Any = None) -> Any:
"""Extract nested attribute safely from object."""
try:
for attr in attr_path.split("."):
obj = getattr(obj, attr)
return obj
except AttributeError:
return default
def _convert_to_numpy(data: Any, dtype: np.dtype) -> Optional[np.ndarray]:
"""Convert data to numpy array with specified dtype, handling None values."""
if data is None:
return None
return np.asarray(data, dtype=dtype)
def load_mesh(mesh):
"""Load mesh data including vertices, faces, UV coordinates and texture."""
# Extract vertex positions and face indices
vtx_pos = _safe_extract_attribute(mesh, "vertices")
pos_idx = _safe_extract_attribute(mesh, "faces")
# Extract UV coordinates (reusing face indices for UV indices)
vtx_uv = _safe_extract_attribute(mesh, "visual.uv")
uv_idx = pos_idx # Reuse face indices for UV mapping
# Convert to numpy arrays with appropriate dtypes
vtx_pos = _convert_to_numpy(vtx_pos, np.float32)
pos_idx = _convert_to_numpy(pos_idx, np.int32)
vtx_uv = _convert_to_numpy(vtx_uv, np.float32)
uv_idx = _convert_to_numpy(uv_idx, np.int32)
texture_data = None
return vtx_pos, pos_idx, vtx_uv, uv_idx, texture_data
def _get_base_path_and_name(mesh_path: str) -> Tuple[str, str]:
"""Get base path without extension and mesh name."""
base_path = os.path.splitext(mesh_path)[0]
name = os.path.basename(base_path)
return base_path, name
def _save_texture_map(
texture: np.ndarray,
base_path: str,
suffix: str = "",
image_format: str = ".jpg",
color_convert: Optional[int] = None,
) -> str:
"""Save texture map with optional color conversion."""
path = f"{base_path}{suffix}{image_format}"
processed_texture = (texture * 255).astype(np.uint8)
if color_convert is not None:
processed_texture = cv2.cvtColor(processed_texture, color_convert)
cv2.imwrite(path, processed_texture)
else:
cv2.imwrite(path, processed_texture[..., ::-1]) # RGB to BGR
return os.path.basename(path)
def _write_mtl_properties(f, properties: Dict[str, Any]):
"""Write material properties to MTL file."""
for key, value in properties.items():
if isinstance(value, (list, tuple)):
f.write(f"{key} {' '.join(map(str, value))}\n")
else:
f.write(f"{key} {value}\n")
def _create_obj_content(
vtx_pos: np.ndarray, vtx_uv: np.ndarray, pos_idx: np.ndarray, uv_idx: np.ndarray, name: str
) -> str:
"""Create OBJ file content."""
buffer = StringIO()
# Write header and vertices
buffer.write(f"mtllib {name}.mtl\no {name}\n")
np.savetxt(buffer, vtx_pos, fmt="v %.6f %.6f %.6f")
np.savetxt(buffer, vtx_uv, fmt="vt %.6f %.6f")
buffer.write("s 0\nusemtl Material\n")
# Write faces
pos_idx_plus1 = pos_idx + 1
uv_idx_plus1 = uv_idx + 1
face_format = np.frompyfunc(lambda *x: f"{int(x[0])}/{int(x[1])}", 2, 1)
faces = face_format(pos_idx_plus1, uv_idx_plus1)
face_strings = [f"f {' '.join(face)}" for face in faces]
buffer.write("\n".join(face_strings) + "\n")
return buffer.getvalue()
def save_obj_mesh(mesh_path, vtx_pos, pos_idx, vtx_uv, uv_idx, texture, metallic=None, roughness=None, normal=None):
"""Save mesh as OBJ file with textures and material."""
# Convert inputs to numpy arrays
vtx_pos = _convert_to_numpy(vtx_pos, np.float32)
vtx_uv = _convert_to_numpy(vtx_uv, np.float32)
pos_idx = _convert_to_numpy(pos_idx, np.int32)
uv_idx = _convert_to_numpy(uv_idx, np.int32)
base_path, name = _get_base_path_and_name(mesh_path)
# Create and save OBJ content
obj_content = _create_obj_content(vtx_pos, vtx_uv, pos_idx, uv_idx, name)
with open(mesh_path, "w") as obj_file:
obj_file.write(obj_content)
# Save texture maps
texture_maps = {}
texture_maps["diffuse"] = _save_texture_map(texture, base_path)
if metallic is not None:
texture_maps["metallic"] = _save_texture_map(metallic, base_path, "_metallic", color_convert=cv2.COLOR_RGB2GRAY)
if roughness is not None:
texture_maps["roughness"] = _save_texture_map(
roughness, base_path, "_roughness", color_convert=cv2.COLOR_RGB2GRAY
)
if normal is not None:
texture_maps["normal"] = _save_texture_map(normal, base_path, "_normal")
# Create MTL file
_create_mtl_file(base_path, texture_maps, metallic is not None)
def _create_mtl_file(base_path: str, texture_maps: Dict[str, str], is_pbr: bool):
"""Create MTL material file."""
mtl_path = f"{base_path}.mtl"
with open(mtl_path, "w") as f:
f.write("newmtl Material\n")
if is_pbr:
# PBR material properties
properties = {
"Kd": [0.800, 0.800, 0.800],
"Ke": [0.000, 0.000, 0.000], # 鐜鍏夐伄钄<E4BC84>
"Ni": 1.500, # 鎶樺皠绯绘暟
"d": 1.0, # 閫忔槑搴<E6A791>
"illum": 2, # 鍏夌収妯″瀷
"map_Kd": texture_maps["diffuse"],
}
_write_mtl_properties(f, properties)
# Additional PBR maps
map_configs = [("metallic", "map_Pm"), ("roughness", "map_Pr"), ("normal", "map_Bump -bm 1.0")]
for texture_key, mtl_key in map_configs:
if texture_key in texture_maps:
f.write(f"{mtl_key} {texture_maps[texture_key]}\n")
else:
# Standard material properties
properties = {
"Ns": 250.000000,
"Ka": [0.200, 0.200, 0.200],
"Kd": [0.800, 0.800, 0.800],
"Ks": [0.500, 0.500, 0.500],
"Ke": [0.000, 0.000, 0.000],
"Ni": 1.500,
"d": 1.0,
"illum": 3,
"map_Kd": texture_maps["diffuse"],
}
_write_mtl_properties(f, properties)
def save_mesh(mesh_path, vtx_pos, pos_idx, vtx_uv, uv_idx, texture, metallic=None, roughness=None, normal=None):
"""Save mesh using OBJ format."""
save_obj_mesh(
mesh_path, vtx_pos, pos_idx, vtx_uv, uv_idx, texture, metallic=metallic, roughness=roughness, normal=normal
)
def _setup_blender_scene():
"""Setup Blender scene for conversion."""
if "convert" not in bpy.data.scenes:
bpy.data.scenes.new("convert")
bpy.context.window.scene = bpy.data.scenes["convert"]
def _clear_scene_objects():
"""Clear all objects from current Blender scene."""
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.data.objects.remove(obj, do_unlink=True)
def _select_mesh_objects():
"""Select all mesh objects in scene."""
bpy.ops.object.select_all(action="DESELECT")
for obj in bpy.context.scene.objects:
if obj.type == "MESH":
obj.select_set(True)
def _merge_vertices_if_needed(merge_vertices: bool):
"""Merge duplicate vertices if requested."""
if not merge_vertices:
return
for obj in bpy.context.selected_objects:
if obj.type == "MESH":
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode="OBJECT")
def _apply_shading(shade_type: str, auto_smooth_angle: float):
"""Apply shading to selected objects."""
shading_ops = {
"SMOOTH": lambda: bpy.ops.object.shade_smooth(),
"FLAT": lambda: bpy.ops.object.shade_flat(),
"AUTO_SMOOTH": lambda: _apply_auto_smooth(auto_smooth_angle),
}
if shade_type in shading_ops:
shading_ops[shade_type]()
def _apply_auto_smooth(auto_smooth_angle: float):
"""Apply auto smooth based on Blender version."""
angle_rad = math.radians(auto_smooth_angle)
if bpy.app.version < (4, 1, 0):
bpy.ops.object.shade_smooth(use_auto_smooth=True, auto_smooth_angle=angle_rad)
elif bpy.app.version < (4, 2, 0):
bpy.ops.object.shade_smooth_by_angle(angle=angle_rad)
else:
bpy.ops.object.shade_auto_smooth(angle=angle_rad)
def convert_obj_to_glb(
obj_path: str,
glb_path: str,
shade_type: str = "SMOOTH",
auto_smooth_angle: float = 60,
merge_vertices: bool = False,
) -> bool:
"""Convert OBJ file to GLB format using Blender."""
try:
_setup_blender_scene()
_clear_scene_objects()
# Import OBJ file
bpy.ops.wm.obj_import(filepath=obj_path)
_select_mesh_objects()
# Process meshes
_merge_vertices_if_needed(merge_vertices)
_apply_shading(shade_type, auto_smooth_angle)
# Export to GLB
bpy.ops.export_scene.gltf(filepath=glb_path, use_active_scene=True)
return True
except Exception:
return False

81
hy3dpaint/LICENSE Normal file
View File

@@ -0,0 +1,81 @@
TENCENT HUNYUAN 3D 2.1 COMMUNITY LICENSE AGREEMENT
Tencent Hunyuan 3D 2.1 Release Date: June 13, 2025
THIS LICENSE AGREEMENT DOES NOT APPLY IN THE EUROPEAN UNION, UNITED KINGDOM AND SOUTH KOREA AND IS EXPRESSLY LIMITED TO THE TERRITORY, AS DEFINED BELOW.
By clicking to agree or by using, reproducing, modifying, distributing, performing or displaying any portion or element of the Tencent Hunyuan 3D 2.1 Works, including via any Hosted Service, You will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
1. DEFINITIONS.
a. “Acceptable Use Policy” shall mean the policy made available by Tencent as set forth in the Exhibit A.
b. “Agreement” shall mean the terms and conditions for use, reproduction, distribution, modification, performance and displaying of Tencent Hunyuan 3D 2.1 Works or any portion or element thereof set forth herein.
c. “Documentation” shall mean the specifications, manuals and documentation for Tencent Hunyuan 3D 2.1 made publicly available by Tencent.
d. “Hosted Service” shall mean a hosted service offered via an application programming interface (API), web access, or any other electronic or remote means.
e. “Licensee,” “You” or “Your” shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Tencent Hunyuan 3D 2.1 Works for any purpose and in any field of use.
f. “Materials” shall mean, collectively, Tencents proprietary Tencent Hunyuan 3D 2.1 and Documentation (and any portion thereof) as made available by Tencent under this Agreement.
g. “Model Derivatives” shall mean all: (i) modifications to Tencent Hunyuan 3D 2.1 or any Model Derivative of Tencent Hunyuan 3D 2.1; (ii) works based on Tencent Hunyuan 3D 2.1 or any Model Derivative of Tencent Hunyuan 3D 2.1; or (iii) any other machine learning model which is created by transfer of patterns of the weights, parameters, operations, or Output of Tencent Hunyuan 3D 2.1 or any Model Derivative of Tencent Hunyuan 3D 2.1, to that model in order to cause that model to perform similarly to Tencent Hunyuan 3D 2.1 or a Model Derivative of Tencent Hunyuan 3D 2.1, including distillation methods, methods that use intermediate data representations, or methods based on the generation of synthetic data Outputs by Tencent Hunyuan 3D 2.1 or a Model Derivative of Tencent Hunyuan 3D 2.1 for training that model. For clarity, Outputs by themselves are not deemed Model Derivatives.
h. “Output” shall mean the information and/or content output of Tencent Hunyuan 3D 2.1 or a Model Derivative that results from operating or otherwise using Tencent Hunyuan 3D 2.1 or a Model Derivative, including via a Hosted Service.
i. “Tencent,” “We” or “Us” shall mean THL Q Limited.
j. “Tencent Hunyuan 3D 2.1” shall mean the 3D generation models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Us at [ https://github.com/Tencent-Hunyuan/Hunyuan3D-2.1].
k. “Tencent Hunyuan 3D 2.1 Works” shall mean: (i) the Materials; (ii) Model Derivatives; and (iii) all derivative works thereof.
l. “Territory” shall mean the worldwide territory, excluding the territory of the European Union, United Kingdom and South Korea.
m. “Third Party” or “Third Parties” shall mean individuals or legal entities that are not under common control with Us or You.
n. “including” shall mean including but not limited to.
2. GRANT OF RIGHTS.
We grant You, for the Territory only, a non-exclusive, non-transferable and royalty-free limited license under Tencents intellectual property or other rights owned by Us embodied in or utilized by the Materials to use, reproduce, distribute, create derivative works of (including Model Derivatives), and make modifications to the Materials, only in accordance with the terms of this Agreement and the Acceptable Use Policy, and You must not violate (or encourage or permit anyone else to violate) any term of this Agreement or the Acceptable Use Policy.
3. DISTRIBUTION.
You may, subject to Your compliance with this Agreement, distribute or make available to Third Parties the Tencent Hunyuan 3D 2.1 Works, exclusively in the Territory, provided that You meet all of the following conditions:
a. You must provide all such Third Party recipients of the Tencent Hunyuan 3D 2.1 Works or products or services using them a copy of this Agreement;
b. You must cause any modified files to carry prominent notices stating that You changed the files;
c. You are encouraged to: (i) publish at least one technology introduction blogpost or one public statement expressing Your experience of using the Tencent Hunyuan 3D 2.1 Works; and (ii) mark the products or services developed by using the Tencent Hunyuan 3D 2.1 Works to indicate that the product/service is “Powered by Tencent Hunyuan”; and
d. All distributions to Third Parties (other than through a Hosted Service) must be accompanied by a “Notice” text file that contains the following notice: “Tencent Hunyuan 3D 2.1 is licensed under the Tencent Hunyuan 3D 2.1 Community License Agreement, Copyright © 2025 Tencent. All Rights Reserved. The trademark rights of “Tencent Hunyuan” are owned by Tencent or its affiliate.”
You may add Your own copyright statement to Your modifications and, except as set forth in this Section and in Section 5, may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Model Derivatives as a whole, provided Your use, reproduction, modification, distribution, performance and display of the work otherwise complies with the terms and conditions of this Agreement (including as regards the Territory). If You receive Tencent Hunyuan 3D 2.1 Works from a Licensee as part of an integrated end user product, then this Section 3 of this Agreement will not apply to You.
4. ADDITIONAL COMMERCIAL TERMS.
If, on the Tencent Hunyuan 3D 2.1 version release date, the monthly active users of all products or services made available by or for Licensee is greater than 1 million monthly active users in the preceding calendar month, You must request a license from Tencent, which Tencent may grant to You in its sole discretion, and You are not authorized to exercise any of the rights under this Agreement unless or until Tencent otherwise expressly grants You such rights.
Subject to Tencent's written approval, you may request a license for the use of Tencent Hunyuan 3D 2.1 by submitting the following information to hunyuan3d@tencent.com:
a. Your companys name and associated business sector that plans to use Tencent Hunyuan 3D 2.1.
b. Your intended use case and the purpose of using Tencent Hunyuan 3D 2.1.
c. Your plans to modify Tencent Hunyuan 3D 2.1 or create Model Derivatives.
5. RULES OF USE.
a. Your use of the Tencent Hunyuan 3D 2.1 Works must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Tencent Hunyuan 3D 2.1 Works, which is hereby incorporated by reference into this Agreement. You must include the use restrictions referenced in these Sections 5(a) and 5(b) as an enforceable provision in any agreement (e.g., license agreement, terms of use, etc.) governing the use and/or distribution of Tencent Hunyuan 3D 2.1 Works and You must provide notice to subsequent users to whom You distribute that Tencent Hunyuan 3D 2.1 Works are subject to the use restrictions in these Sections 5(a) and 5(b).
b. You must not use the Tencent Hunyuan 3D 2.1 Works or any Output or results of the Tencent Hunyuan 3D 2.1 Works to improve any other AI model (other than Tencent Hunyuan 3D 2.1 or Model Derivatives thereof).
c. You must not use, reproduce, modify, distribute, or display the Tencent Hunyuan 3D 2.1 Works, Output or results of the Tencent Hunyuan 3D 2.1 Works outside the Territory. Any such use outside the Territory is unlicensed and unauthorized under this Agreement.
6. INTELLECTUAL PROPERTY.
a. Subject to Tencents ownership of Tencent Hunyuan 3D 2.1 Works made by or for Tencent and intellectual property rights therein, conditioned upon Your compliance with the terms and conditions of this Agreement, as between You and Tencent, You will be the owner of any derivative works and modifications of the Materials and any Model Derivatives that are made by or for You.
b. No trademark licenses are granted under this Agreement, and in connection with the Tencent Hunyuan 3D 2.1 Works, Licensee may not use any name or mark owned by or associated with Tencent or any of its affiliates, except as required for reasonable and customary use in describing and distributing the Tencent Hunyuan 3D 2.1 Works. Tencent hereby grants You a license to use “Tencent Hunyuan” (the “Mark”) in the Territory solely as required to comply with the provisions of Section 3(c), provided that You comply with any applicable laws related to trademark protection. All goodwill arising out of Your use of the Mark will inure to the benefit of Tencent.
c. If You commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any person or entity alleging that the Materials or any Output, or any portion of any of the foregoing, infringe any intellectual property or other right owned or licensable by You, then all licenses granted to You under this Agreement shall terminate as of the date such lawsuit or other proceeding is filed. You will defend, indemnify and hold harmless Us from and against any claim by any Third Party arising out of or related to Your or the Third Partys use or distribution of the Tencent Hunyuan 3D 2.1 Works.
d. Tencent claims no rights in Outputs You generate. You and Your users are solely responsible for Outputs and their subsequent uses.
7. DISCLAIMERS OF WARRANTY AND LIMITATIONS OF LIABILITY.
a. We are not obligated to support, update, provide training for, or develop any further version of the Tencent Hunyuan 3D 2.1 Works or to grant any license thereto.
b. UNLESS AND ONLY TO THE EXTENT REQUIRED BY APPLICABLE LAW, THE TENCENT HUNYUAN 3D 2.1 WORKS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED “AS IS” WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES OF ANY KIND INCLUDING ANY WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, COURSE OF DEALING, USAGE OF TRADE, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING, REPRODUCING, MODIFYING, PERFORMING, DISPLAYING OR DISTRIBUTING ANY OF THE TENCENT HUNYUAN 3D 2.1 WORKS OR OUTPUTS AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH YOUR OR A THIRD PARTYS USE OR DISTRIBUTION OF ANY OF THE TENCENT HUNYUAN 3D 2.1 WORKS OR OUTPUTS AND YOUR EXERCISE OF RIGHTS AND PERMISSIONS UNDER THIS AGREEMENT.
c. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL TENCENT OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, FOR ANY DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR LOST PROFITS OF ANY KIND ARISING FROM THIS AGREEMENT OR RELATED TO ANY OF THE TENCENT HUNYUAN 3D 2.1 WORKS OR OUTPUTS, EVEN IF TENCENT OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
8. SURVIVAL AND TERMINATION.
a. The term of this Agreement shall commence upon Your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
b. We may terminate this Agreement if You breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, You must promptly delete and cease use of the Tencent Hunyuan 3D 2.1 Works. Sections 6(a), 6(c), 7 and 9 shall survive the termination of this Agreement.
9. GOVERNING LAW AND JURISDICTION.
a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the Hong Kong Special Administrative Region of the Peoples Republic of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
b. Exclusive jurisdiction and venue for any dispute arising out of or relating to this Agreement will be a court of competent jurisdiction in the Hong Kong Special Administrative Region of the Peoples Republic of China, and Tencent and Licensee consent to the exclusive jurisdiction of such court with respect to any such dispute.
EXHIBIT A
ACCEPTABLE USE POLICY
Tencent reserves the right to update this Acceptable Use Policy from time to time.
Last modified: November 5, 2024
Tencent endeavors to promote safe and fair use of its tools and features, including Tencent Hunyuan 3D 2.1. You agree not to use Tencent Hunyuan 3D 2.1 or Model Derivatives:
1. Outside the Territory;
2. In any way that violates any applicable national, federal, state, local, international or any other law or regulation;
3. To harm Yourself or others;
4. To repurpose or distribute output from Tencent Hunyuan 3D 2.1 or any Model Derivatives to harm Yourself or others;
5. To override or circumvent the safety guardrails and safeguards We have put in place;
6. For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
7. To generate or disseminate verifiably false information and/or content with the purpose of harming others or influencing elections;
8. To generate or facilitate false online engagement, including fake reviews and other means of fake online engagement;
9. To intentionally defame, disparage or otherwise harass others;
10. To generate and/or disseminate malware (including ransomware) or any other content to be used for the purpose of harming electronic systems;
11. To generate or disseminate personal identifiable information with the purpose of harming others;
12. To generate or disseminate information (including images, code, posts, articles), and place the information in any public context (including through the use of bot generated tweets), without expressly and conspicuously identifying that the information and/or content is machine generated;
13. To impersonate another individual without consent, authorization, or legal right;
14. To make high-stakes automated decisions in domains that affect an individuals safety, rights or wellbeing (e.g., law enforcement, migration, medicine/health, management of critical infrastructure, safety components of products, essential services, credit, employment, housing, education, social scoring, or insurance);
15. In a manner that violates or disrespects the social ethics and moral standards of other countries or regions;
16. To perform, facilitate, threaten, incite, plan, promote or encourage violent extremism or terrorism;
17. For any use intended to discriminate against or harm individuals or groups based on protected characteristics or categories, online or offline social behavior or known or predicted personal or personality characteristics;
18. To intentionally exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
19. For military purposes;
20. To engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or other professional practices.

96
hy3dpaint/README.md Normal file
View File

@@ -0,0 +1,96 @@
# Hunyuan3D-Paint 2.1
Hunyuan3D-Paint 2.1 is a high quality PBR texture generation model for 3D meshes, powered by [RomanTex](https://github.com/oakshy/RomanTex) and [MaterialMVP](https://github.com/ZebinHe/MaterialMVP/).
## Quick Inference
You need to manually download the RealESRGAN weights to the ckpt folder using the following command:
```bash
wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ckpt
```
Given a 3D mesh `mesh.glb` and a reference image `image.png`, you can run inference using the following code. The result will be saved as `textured_mesh.glb`.
```bash
python3 demo.py
```
**Optional arguments in `demo.py`:**
- `max_num_view` : Maximum number of views, adaptively selected by the model (integer between 6 to 9)
- `resolution` : Resolution for generated PBR textures (512 or 768)
**Memory Recommendation:** For `max_num_view=6` and `resolution=512`, we recommend using a GPU with at least **21GB VRAM**.
## Training
### Data Prepare
We provide a piece of data in `train_examples` for the overfitting training test. The data structure should be organized as follows:
```
train_examples/
├── examples.json
└── 001/
├── render_tex/ # Rendered generated PBR images
│ ├── 000.png # Rendered views (RGB images)
│ ├── 000_albedo.png # Albedo maps for each view
│ ├── 000_mr.png # Metallic-Roughness maps for each view, R and G channels
│ ├── 000_normal.png # Normal maps
│ ├── 000_normal.png # Normal maps
│ ├── 000_pos.png # Position maps
│ ├── 000_pos.png # Position maps
│ ├── 001.png # Additional views...
│ ├── 001_albedo.png
│ ├── 001_mr.png
│ ├── 001_normal.png
│ ├── 001_pos.png
│ └── ... # More views (002, 003, 004, 005, ...)
└── render_cond/ # Rendered reference images (at least two light conditions should be rendered to facilitate consistency loss)
├── 000_light_AL.png # Light condition 1 (Area Light)
├── 000_light_ENVMAP.png # Light condition 2 (Environment map)
├── 000_light_PL.png # Light condition 3 (Point lighting)
├── 001_light_AL.png
├── 001_light_ENVMAP.png
├── 001_light_PL.png
└── ... # More lighting conditions (002-005, ...)
```
Each training example contains:
- **render_tex/**: Multi-view renderings with PBR material properties
- Main RGB images (`XXX.png`)
- Albedo maps (`XXX_albedo.png`)
- Metallic-Roughness maps (`XXX_mr.png`)
- Normal maps (`XXX_normal.png/jpg`)
- Position maps (`XXX_pos.png/jpg`)
- Camera transforms (`transforms.json`)
- **render_cond/**: Lighting condition maps for each view
- Ambient lighting (`XXX_light_AL.png`)
- Environment map lighting (`XXX_light_ENVMAP.png`)
- Point lighting (`XXX_light_PL.png`)
### Launch Training
```bash
python3 train.py --base 'cfgs/hunyuan-paint-pbr.yaml' --name overfit --logdir logs/
```
## BibTeX
If you found Hunyuan3D-Paint 2.1 helpful, please cite our papers:
```bibtex
@article{feng2025romantex,
title={RomanTex: Decoupling 3D-aware Rotary Positional Embedded Multi-Attention Network for Texture Synthesis},
author={Feng, Yifei and Yang, Mingxin and Yang, Shuhui and Zhang, Sheng and Yu, Jiaao and Zhao, Zibo and Liu, Yuhong and Jiang, Jie and Guo, Chunchao},
journal={arXiv preprint arXiv:2503.19011},
year={2025}
}
@article{he2025materialmvp,
title={MaterialMVP: Illumination-Invariant Material Generation via Multi-view PBR Diffusion},
author={He, Zebin and Yang, Mingxin and Yang, Shuhui and Tang, Yixuan and Wang, Tao and Zhang, Kaihao and Chen, Guanying and Liu, Yuhong and Jiang, Jie and Guo, Chunchao and Luo, Wenhan},
journal={arXiv preprint arXiv:2503.10289},
year={2025}
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 863 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 648 KiB

Binary file not shown.

View File

@@ -0,0 +1,52 @@
model:
base_learning_rate: 5.0e-05
target: hunyuanpaintpbr.model.HunyuanPaint
params:
num_view: 6
view_size: 512
drop_cond_prob: 0.1
noise_in_channels: 12
stable_diffusion_config:
pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1
custom_pipeline: ./hunyuanpaintpbr
data:
target: src.data.objaverse_hunyuan.DataModuleFromConfig
params:
batch_size: 1
num_workers: 4
train:
-
target: src.data.dataloader.objaverse_loader_forTexturePBR.TextureDataset
params:
num_view: 6
json_path: train_examples/examples.json
validation:
-
target: src.data.dataloader.objaverse_loader_forTexturePBR.TextureDataset
params:
num_view: 6
json_path: train_examples/examples.json
lightning:
modelcheckpoint:
params:
every_n_train_steps: 10000
save_top_k: -1
save_last: true
callbacks: {}
trainer:
benchmark: true
max_epochs: -1
gradient_clip_val: 1.0
val_check_interval: 1000
num_sanity_val_steps: 0
accumulate_grad_batches: 1
check_val_every_n_epoch: null # if not set this, validation does not run
init_control_from:
resume_from:

140
hy3dpaint/convert_utils.py Normal file
View File

@@ -0,0 +1,140 @@
import trimesh
import pygltflib
import numpy as np
from PIL import Image
import base64
import io
def combine_metallic_roughness(metallic_path, roughness_path, output_path):
"""
将metallic和roughness贴图合并为一张贴图
GLB格式要求metallic在B通道roughness在G通道
"""
# 加载贴图
metallic_img = Image.open(metallic_path).convert("L") # 转为灰度
roughness_img = Image.open(roughness_path).convert("L") # 转为灰度
# 确保尺寸一致
if metallic_img.size != roughness_img.size:
roughness_img = roughness_img.resize(metallic_img.size)
# 创建RGB图像
width, height = metallic_img.size
combined = Image.new("RGB", (width, height))
# 转为numpy数组便于操作
metallic_array = np.array(metallic_img)
roughness_array = np.array(roughness_img)
# 创建合并的数组 (R, G, B) = (AO, Roughness, Metallic)
combined_array = np.zeros((height, width, 3), dtype=np.uint8)
combined_array[:, :, 0] = 255 # R通道AO (如果没有AO贴图设为白色)
combined_array[:, :, 1] = roughness_array # G通道Roughness
combined_array[:, :, 2] = metallic_array # B通道Metallic
# 转回PIL图像并保存
combined = Image.fromarray(combined_array)
combined.save(output_path)
return output_path
def create_glb_with_pbr_materials(obj_path, textures_dict, output_path):
"""
使用pygltflib创建包含完整PBR材质的GLB文件
textures_dict = {
'albedo': 'path/to/albedo.png',
'metallic': 'path/to/metallic.png',
'roughness': 'path/to/roughness.png',
'normal': 'path/to/normal.png', # 可选
'ao': 'path/to/ao.png' # 可选
}
"""
# 1. 加载OBJ文件
mesh = trimesh.load(obj_path)
# 2. 先导出为临时GLB
temp_glb = "temp.glb"
mesh.export(temp_glb)
# 3. 加载GLB文件进行材质编辑
gltf = pygltflib.GLTF2().load(temp_glb)
# 4. 准备纹理数据
def image_to_data_uri(image_path):
"""将图像转换为data URI"""
with open(image_path, "rb") as f:
image_data = f.read()
encoded = base64.b64encode(image_data).decode()
return f"data:image/png;base64,{encoded}"
# 5. 合并metallic和roughness
if "metallic" in textures_dict and "roughness" in textures_dict:
mr_combined_path = "mr_combined.png"
combine_metallic_roughness(textures_dict["metallic"], textures_dict["roughness"], mr_combined_path)
textures_dict["metallicRoughness"] = mr_combined_path
# 6. 添加图像到GLTF
images = []
textures = []
texture_mapping = {
"albedo": "baseColorTexture",
"metallicRoughness": "metallicRoughnessTexture",
"normal": "normalTexture",
"ao": "occlusionTexture",
}
for tex_type, tex_path in textures_dict.items():
if tex_type in texture_mapping and tex_path:
# 添加图像
image = pygltflib.Image(uri=image_to_data_uri(tex_path))
images.append(image)
# 添加纹理
texture = pygltflib.Texture(source=len(images) - 1)
textures.append(texture)
# 7. 创建PBR材质
pbr_metallic_roughness = pygltflib.PbrMetallicRoughness(
baseColorFactor=[1.0, 1.0, 1.0, 1.0], metallicFactor=1.0, roughnessFactor=1.0
)
# 设置纹理索引
texture_index = 0
if "albedo" in textures_dict:
pbr_metallic_roughness.baseColorTexture = pygltflib.TextureInfo(index=texture_index)
texture_index += 1
if "metallicRoughness" in textures_dict:
pbr_metallic_roughness.metallicRoughnessTexture = pygltflib.TextureInfo(index=texture_index)
texture_index += 1
# 创建材质
material = pygltflib.Material(name="PBR_Material", pbrMetallicRoughness=pbr_metallic_roughness)
# 添加法线贴图
if "normal" in textures_dict:
material.normalTexture = pygltflib.NormalTextureInfo(index=texture_index)
texture_index += 1
# 添加AO贴图
if "ao" in textures_dict:
material.occlusionTexture = pygltflib.OcclusionTextureInfo(index=texture_index)
# 8. 更新GLTF
gltf.images = images
gltf.textures = textures
gltf.materials = [material]
# 确保mesh使用材质
if gltf.meshes:
for primitive in gltf.meshes[0].primitives:
primitive.material = 0
# 9. 保存最终GLB
gltf.save(output_path)
print(f"PBR GLB文件已保存: {output_path}")

View File

@@ -0,0 +1,4 @@
"""
from .render import rasterize, interpolate
"""
from .render import *

View File

@@ -0,0 +1,32 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import custom_rasterizer_kernel
import torch
def rasterize(pos, tri, resolution, clamp_depth=torch.zeros(0), use_depth_prior=0):
assert pos.device == tri.device
findices, barycentric = custom_rasterizer_kernel.rasterize_image(
pos[0], tri, clamp_depth, resolution[1], resolution[0], 1e-6, use_depth_prior
)
return findices, barycentric
def interpolate(col, findices, barycentric, tri):
f = findices - 1 + (findices == 0)
vcol = col[0, tri.long()[f.long()]]
result = barycentric.view(*barycentric.shape, 1) * vcol
result = torch.sum(result, axis=-2)
return result.view(1, *result.shape)

View File

@@ -0,0 +1,574 @@
#include "rasterizer.h"
#include <fstream>
inline int pos2key(float* p, int resolution) {
int x = (p[0] * 0.5 + 0.5) * resolution;
int y = (p[1] * 0.5 + 0.5) * resolution;
int z = (p[2] * 0.5 + 0.5) * resolution;
return (x * resolution + y) * resolution + z;
}
inline void key2pos(int key, int resolution, float* p) {
int x = key / resolution / resolution;
int y = key / resolution % resolution;
int z = key % resolution;
p[0] = ((x + 0.5) / resolution - 0.5) * 2;
p[1] = ((y + 0.5) / resolution - 0.5) * 2;
p[2] = ((z + 0.5) / resolution - 0.5) * 2;
}
inline void key2cornerpos(int key, int resolution, float* p) {
int x = key / resolution / resolution;
int y = key / resolution % resolution;
int z = key % resolution;
p[0] = ((x + 0.75) / resolution - 0.5) * 2;
p[1] = ((y + 0.25) / resolution - 0.5) * 2;
p[2] = ((z + 0.75) / resolution - 0.5) * 2;
}
inline float* pos_ptr(int l, int i, int j, torch::Tensor t) {
float* pdata = t.data_ptr<float>();
int height = t.size(1);
int width = t.size(2);
return &pdata[((l * height + i) * width + j) * 4];
}
struct Grid
{
std::vector<int> seq2oddcorner;
std::vector<int> seq2evencorner;
std::vector<int> seq2grid;
std::vector<int> seq2normal;
std::vector<int> seq2neighbor;
std::unordered_map<int, int> grid2seq;
std::vector<int> downsample_seq;
int num_origin_seq;
int resolution;
int stride;
};
inline void pos_from_seq(Grid& grid, int seq, float* p) {
auto k = grid.seq2grid[seq];
key2pos(k, grid.resolution, p);
}
inline int fetch_seq(Grid& grid, int l, int i, int j, torch::Tensor pdata) {
float* p = pos_ptr(l, i, j, pdata);
if (p[3] == 0)
return -1;
auto key = pos2key(p, grid.resolution);
int seq = grid.grid2seq[key];
return seq;
}
inline int fetch_last_seq(Grid& grid, int i, int j, torch::Tensor pdata) {
int num_layers = pdata.size(0);
int l = 0;
int idx = fetch_seq(grid, l, i, j, pdata);
while (l < num_layers - 1) {
l += 1;
int new_idx = fetch_seq(grid, l, i, j, pdata);
if (new_idx == -1)
break;
idx = new_idx;
}
return idx;
}
inline int fetch_nearest_seq(Grid& grid, int i, int j, int dim, float d, torch::Tensor pdata) {
float p[3];
float max_dist = 1e10;
int best_idx = -1;
int num_layers = pdata.size(0);
for (int l = 0; l < num_layers; ++l) {
int idx = fetch_seq(grid, l, i, j, pdata);
if (idx == -1)
break;
pos_from_seq(grid, idx, p);
float dist = std::abs(d - p[(dim + 2) % 3]);
if (dist < max_dist) {
max_dist = dist;
best_idx = idx;
}
}
return best_idx;
}
inline int fetch_nearest_seq_layer(Grid& grid, int i, int j, int dim, float d, torch::Tensor pdata) {
float p[3];
float max_dist = 1e10;
int best_layer = -1;
int num_layers = pdata.size(0);
for (int l = 0; l < num_layers; ++l) {
int idx = fetch_seq(grid, l, i, j, pdata);
if (idx == -1)
break;
pos_from_seq(grid, idx, p);
float dist = std::abs(d - p[(dim + 2) % 3]);
if (dist < max_dist) {
max_dist = dist;
best_layer = l;
}
}
return best_layer;
}
void FetchNeighbor(Grid& grid, int seq, float* pos, int dim, int boundary_info, std::vector<torch::Tensor>& view_layer_positions,
int* output_indices)
{
auto t = view_layer_positions[dim];
int height = t.size(1);
int width = t.size(2);
int top = 0;
int ci = 0;
int cj = 0;
if (dim == 0) {
ci = (pos[1]/2+0.5)*height;
cj = (pos[0]/2+0.5)*width;
}
else if (dim == 1) {
ci = (pos[1]/2+0.5)*height;
cj = (pos[2]/2+0.5)*width;
}
else {
ci = (-pos[2]/2+0.5)*height;
cj = (pos[0]/2+0.5)*width;
}
int stride = grid.stride;
for (int ni = ci + stride; ni >= ci - stride; ni -= stride) {
for (int nj = cj - stride; nj <= cj + stride; nj += stride) {
int idx = -1;
if (ni == ci && nj == cj)
idx = seq;
else if (!(ni < 0 || ni >= height || nj < 0 || nj >= width)) {
if (boundary_info == -1)
idx = fetch_seq(grid, 0, ni, nj, t);
else if (boundary_info == 1)
idx = fetch_last_seq(grid, ni, nj, t);
else
idx = fetch_nearest_seq(grid, ni, nj, dim, pos[(dim + 2) % 3], t);
}
output_indices[top] = idx;
top += 1;
}
}
}
void DownsampleGrid(Grid& src, Grid& tar)
{
src.downsample_seq.resize(src.seq2grid.size(), -1);
tar.resolution = src.resolution / 2;
tar.stride = src.stride * 2;
float pos[3];
std::vector<int> seq2normal_count;
for (int i = 0; i < src.seq2grid.size(); ++i) {
key2pos(src.seq2grid[i], src.resolution, pos);
int k = pos2key(pos, tar.resolution);
int s = seq2normal_count.size();
if (!tar.grid2seq.count(k)) {
tar.grid2seq[k] = tar.seq2grid.size();
tar.seq2grid.emplace_back(k);
seq2normal_count.emplace_back(0);
seq2normal_count.emplace_back(0);
seq2normal_count.emplace_back(0);
//tar.seq2normal.emplace_back(src.seq2normal[i]);
} else {
s = tar.grid2seq[k] * 3;
}
seq2normal_count[s + src.seq2normal[i]] += 1;
src.downsample_seq[i] = tar.grid2seq[k];
}
tar.seq2normal.resize(seq2normal_count.size() / 3);
for (int i = 0; i < seq2normal_count.size(); i += 3) {
int t = 0;
for (int j = 1; j < 3; ++j) {
if (seq2normal_count[i + j] > seq2normal_count[i + t])
t = j;
}
tar.seq2normal[i / 3] = t;
}
}
void NeighborGrid(Grid& grid, std::vector<torch::Tensor> view_layer_positions, int v)
{
grid.seq2evencorner.resize(grid.seq2grid.size(), 0);
grid.seq2oddcorner.resize(grid.seq2grid.size(), 0);
std::unordered_set<int> visited_seq;
for (int vd = 0; vd < 3; ++vd) {
auto t = view_layer_positions[vd];
auto t0 = view_layer_positions[v];
int height = t.size(1);
int width = t.size(2);
int num_layers = t.size(0);
int num_view_layers = t0.size(0);
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
for (int l = 0; l < num_layers; ++l) {
int seq = fetch_seq(grid, l, i, j, t);
if (seq == -1)
break;
int dim = grid.seq2normal[seq];
if (dim != v)
continue;
float pos[3];
pos_from_seq(grid, seq, pos);
int ci = 0;
int cj = 0;
if (dim == 0) {
ci = (pos[1]/2+0.5)*height;
cj = (pos[0]/2+0.5)*width;
}
else if (dim == 1) {
ci = (pos[1]/2+0.5)*height;
cj = (pos[2]/2+0.5)*width;
}
else {
ci = (-pos[2]/2+0.5)*height;
cj = (pos[0]/2+0.5)*width;
}
if ((ci % (grid.stride * 2) < grid.stride) && (cj % (grid.stride * 2) >= grid.stride))
grid.seq2evencorner[seq] = 1;
if ((ci % (grid.stride * 2) >= grid.stride) && (cj % (grid.stride * 2) < grid.stride))
grid.seq2oddcorner[seq] = 1;
bool is_boundary = false;
if (vd == v) {
if (l == 0 || l == num_layers - 1)
is_boundary = true;
else {
int seq_new = fetch_seq(grid, l + 1, i, j, t);
if (seq_new == -1)
is_boundary = true;
}
}
int boundary_info = 0;
if (is_boundary && (l == 0))
boundary_info = -1;
else if (is_boundary)
boundary_info = 1;
if (visited_seq.count(seq))
continue;
visited_seq.insert(seq);
FetchNeighbor(grid, seq, pos, dim, boundary_info, view_layer_positions, &grid.seq2neighbor[seq * 9]);
}
}
}
}
}
void PadGrid(Grid& src, Grid& tar, std::vector<torch::Tensor>& view_layer_positions) {
auto& downsample_seq = src.downsample_seq;
auto& seq2evencorner = src.seq2evencorner;
auto& seq2oddcorner = src.seq2oddcorner;
int indices[9];
std::vector<int> mapped_even_corners(tar.seq2grid.size(), 0);
std::vector<int> mapped_odd_corners(tar.seq2grid.size(), 0);
for (int i = 0; i < downsample_seq.size(); ++i) {
if (seq2evencorner[i] > 0) {
mapped_even_corners[downsample_seq[i]] = 1;
}
if (seq2oddcorner[i] > 0) {
mapped_odd_corners[downsample_seq[i]] = 1;
}
}
auto& tar_seq2normal = tar.seq2normal;
auto& tar_seq2grid = tar.seq2grid;
for (int i = 0; i < tar_seq2grid.size(); ++i) {
if (mapped_even_corners[i] == 1 && mapped_odd_corners[i] == 1)
continue;
auto k = tar_seq2grid[i];
float p[3];
key2cornerpos(k, tar.resolution, p);
int src_key = pos2key(p, src.resolution);
if (!src.grid2seq.count(src_key)) {
int seq = src.seq2grid.size();
src.grid2seq[src_key] = seq;
src.seq2evencorner.emplace_back((mapped_even_corners[i] == 0));
src.seq2oddcorner.emplace_back((mapped_odd_corners[i] == 0));
src.seq2grid.emplace_back(src_key);
src.seq2normal.emplace_back(tar_seq2normal[i]);
FetchNeighbor(src, seq, p, tar_seq2normal[i], 0, view_layer_positions, indices);
for (int j = 0; j < 9; ++j) {
src.seq2neighbor.emplace_back(indices[j]);
}
src.downsample_seq.emplace_back(i);
} else {
int seq = src.grid2seq[src_key];
if (mapped_even_corners[i] == 0)
src.seq2evencorner[seq] = 1;
if (mapped_odd_corners[i] == 0)
src.seq2oddcorner[seq] = 1;
}
}
}
std::vector<std::vector<torch::Tensor>> build_hierarchy(std::vector<torch::Tensor> view_layer_positions,
std::vector<torch::Tensor> view_layer_normals, int num_level, int resolution)
{
if (view_layer_positions.size() != 3 || num_level < 1) {
printf("Alert! We require 3 layers and at least 1 level! (%d %d)\n", view_layer_positions.size(), num_level);
return {{},{},{},{}};
}
std::vector<Grid> grids;
grids.resize(num_level);
std::vector<float> seq2pos;
auto& seq2grid = grids[0].seq2grid;
auto& seq2normal = grids[0].seq2normal;
auto& grid2seq = grids[0].grid2seq;
grids[0].resolution = resolution;
grids[0].stride = 1;
auto int64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
for (int v = 0; v < 3; ++v) {
int num_layers = view_layer_positions[v].size(0);
int height = view_layer_positions[v].size(1);
int width = view_layer_positions[v].size(2);
float* data = view_layer_positions[v].data_ptr<float>();
float* data_normal = view_layer_normals[v].data_ptr<float>();
for (int l = 0; l < num_layers; ++l) {
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
float* p = &data[(i * width + j) * 4];
float* n = &data_normal[(i * width + j) * 3];
if (p[3] == 0)
continue;
auto k = pos2key(p, resolution);
if (!grid2seq.count(k)) {
int dim = 0;
for (int d = 0; d < 3; ++d) {
if (std::abs(n[d]) > std::abs(n[dim]))
dim = d;
}
dim = (dim + 1) % 3;
grid2seq[k] = seq2grid.size();
seq2grid.emplace_back(k);
seq2pos.push_back(p[0]);
seq2pos.push_back(p[1]);
seq2pos.push_back(p[2]);
seq2normal.emplace_back(dim);
}
}
}
data += (height * width * 4);
data_normal += (height * width * 3);
}
}
for (int i = 0; i < num_level - 1; ++i) {
DownsampleGrid(grids[i], grids[i + 1]);
}
for (int l = 0; l < num_level; ++l) {
grids[l].seq2neighbor.resize(grids[l].seq2grid.size() * 9, -1);
grids[l].num_origin_seq = grids[l].seq2grid.size();
for (int d = 0; d < 3; ++d) {
NeighborGrid(grids[l], view_layer_positions, d);
}
}
for (int i = num_level - 2; i >= 0; --i) {
PadGrid(grids[i], grids[i + 1], view_layer_positions);
}
for (int i = grids[0].num_origin_seq; i < grids[0].seq2grid.size(); ++i) {
int k = grids[0].seq2grid[i];
float p[3];
key2pos(k, grids[0].resolution, p);
seq2pos.push_back(p[0]);
seq2pos.push_back(p[1]);
seq2pos.push_back(p[2]);
}
std::vector<torch::Tensor> texture_positions(2);
std::vector<torch::Tensor> grid_neighbors(grids.size());
std::vector<torch::Tensor> grid_downsamples(grids.size() - 1);
std::vector<torch::Tensor> grid_evencorners(grids.size());
std::vector<torch::Tensor> grid_oddcorners(grids.size());
texture_positions[0] = torch::zeros({seq2pos.size() / 3, 3}, float_options);
texture_positions[1] = torch::zeros({seq2pos.size() / 3}, float_options);
float* positions_out_ptr = texture_positions[0].data_ptr<float>();
memcpy(positions_out_ptr, seq2pos.data(), sizeof(float) * seq2pos.size());
positions_out_ptr = texture_positions[1].data_ptr<float>();
for (int i = 0; i < grids[0].seq2grid.size(); ++i) {
positions_out_ptr[i] = (i < grids[0].num_origin_seq);
}
for (int i = 0; i < grids.size(); ++i) {
grid_neighbors[i] = torch::zeros({grids[i].seq2grid.size(), 9}, int64_options);
long* nptr = grid_neighbors[i].data_ptr<long>();
for (int j = 0; j < grids[i].seq2neighbor.size(); ++j) {
nptr[j] = grids[i].seq2neighbor[j];
}
grid_evencorners[i] = torch::zeros({grids[i].seq2evencorner.size()}, int64_options);
grid_oddcorners[i] = torch::zeros({grids[i].seq2oddcorner.size()}, int64_options);
long* dptr = grid_evencorners[i].data_ptr<long>();
for (int j = 0; j < grids[i].seq2evencorner.size(); ++j) {
dptr[j] = grids[i].seq2evencorner[j];
}
dptr = grid_oddcorners[i].data_ptr<long>();
for (int j = 0; j < grids[i].seq2oddcorner.size(); ++j) {
dptr[j] = grids[i].seq2oddcorner[j];
}
if (i + 1 < grids.size()) {
grid_downsamples[i] = torch::zeros({grids[i].downsample_seq.size()}, int64_options);
long* dptr = grid_downsamples[i].data_ptr<long>();
for (int j = 0; j < grids[i].downsample_seq.size(); ++j) {
dptr[j] = grids[i].downsample_seq[j];
}
}
}
return {texture_positions, grid_neighbors, grid_downsamples, grid_evencorners, grid_oddcorners};
}
std::vector<std::vector<torch::Tensor>> build_hierarchy_with_feat(
std::vector<torch::Tensor> view_layer_positions,
std::vector<torch::Tensor> view_layer_normals,
std::vector<torch::Tensor> view_layer_feats,
int num_level, int resolution)
{
if (view_layer_positions.size() != 3 || num_level < 1) {
printf("Alert! We require 3 layers and at least 1 level! (%d %d)\n", view_layer_positions.size(), num_level);
return {{},{},{},{}};
}
std::vector<Grid> grids;
grids.resize(num_level);
std::vector<float> seq2pos;
std::vector<float> seq2feat;
auto& seq2grid = grids[0].seq2grid;
auto& seq2normal = grids[0].seq2normal;
auto& grid2seq = grids[0].grid2seq;
grids[0].resolution = resolution;
grids[0].stride = 1;
auto int64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
int feat_channel = 3;
for (int v = 0; v < 3; ++v) {
int num_layers = view_layer_positions[v].size(0);
int height = view_layer_positions[v].size(1);
int width = view_layer_positions[v].size(2);
float* data = view_layer_positions[v].data_ptr<float>();
float* data_normal = view_layer_normals[v].data_ptr<float>();
float* data_feat = view_layer_feats[v].data_ptr<float>();
feat_channel = view_layer_feats[v].size(3);
for (int l = 0; l < num_layers; ++l) {
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
float* p = &data[(i * width + j) * 4];
float* n = &data_normal[(i * width + j) * 3];
float* f = &data_feat[(i * width + j) * feat_channel];
if (p[3] == 0)
continue;
auto k = pos2key(p, resolution);
if (!grid2seq.count(k)) {
int dim = 0;
for (int d = 0; d < 3; ++d) {
if (std::abs(n[d]) > std::abs(n[dim]))
dim = d;
}
dim = (dim + 1) % 3;
grid2seq[k] = seq2grid.size();
seq2grid.emplace_back(k);
seq2pos.push_back(p[0]);
seq2pos.push_back(p[1]);
seq2pos.push_back(p[2]);
for (int c = 0; c < feat_channel; ++c) {
seq2feat.emplace_back(f[c]);
}
seq2normal.emplace_back(dim);
}
}
}
data += (height * width * 4);
data_normal += (height * width * 3);
data_feat += (height * width * feat_channel);
}
}
for (int i = 0; i < num_level - 1; ++i) {
DownsampleGrid(grids[i], grids[i + 1]);
}
for (int l = 0; l < num_level; ++l) {
grids[l].seq2neighbor.resize(grids[l].seq2grid.size() * 9, -1);
grids[l].num_origin_seq = grids[l].seq2grid.size();
for (int d = 0; d < 3; ++d) {
NeighborGrid(grids[l], view_layer_positions, d);
}
}
for (int i = num_level - 2; i >= 0; --i) {
PadGrid(grids[i], grids[i + 1], view_layer_positions);
}
for (int i = grids[0].num_origin_seq; i < grids[0].seq2grid.size(); ++i) {
int k = grids[0].seq2grid[i];
float p[3];
key2pos(k, grids[0].resolution, p);
seq2pos.push_back(p[0]);
seq2pos.push_back(p[1]);
seq2pos.push_back(p[2]);
for (int c = 0; c < feat_channel; ++c) {
seq2feat.emplace_back(0.5);
}
}
std::vector<torch::Tensor> texture_positions(2);
std::vector<torch::Tensor> texture_feats(1);
std::vector<torch::Tensor> grid_neighbors(grids.size());
std::vector<torch::Tensor> grid_downsamples(grids.size() - 1);
std::vector<torch::Tensor> grid_evencorners(grids.size());
std::vector<torch::Tensor> grid_oddcorners(grids.size());
texture_positions[0] = torch::zeros({seq2pos.size() / 3, 3}, float_options);
texture_positions[1] = torch::zeros({seq2pos.size() / 3}, float_options);
texture_feats[0] = torch::zeros({seq2feat.size() / feat_channel, feat_channel}, float_options);
float* positions_out_ptr = texture_positions[0].data_ptr<float>();
memcpy(positions_out_ptr, seq2pos.data(), sizeof(float) * seq2pos.size());
positions_out_ptr = texture_positions[1].data_ptr<float>();
for (int i = 0; i < grids[0].seq2grid.size(); ++i) {
positions_out_ptr[i] = (i < grids[0].num_origin_seq);
}
float* feats_out_ptr = texture_feats[0].data_ptr<float>();
memcpy(feats_out_ptr, seq2feat.data(), sizeof(float) * seq2feat.size());
for (int i = 0; i < grids.size(); ++i) {
grid_neighbors[i] = torch::zeros({grids[i].seq2grid.size(), 9}, int64_options);
long* nptr = grid_neighbors[i].data_ptr<long>();
for (int j = 0; j < grids[i].seq2neighbor.size(); ++j) {
nptr[j] = grids[i].seq2neighbor[j];
}
grid_evencorners[i] = torch::zeros({grids[i].seq2evencorner.size()}, int64_options);
grid_oddcorners[i] = torch::zeros({grids[i].seq2oddcorner.size()}, int64_options);
long* dptr = grid_evencorners[i].data_ptr<long>();
for (int j = 0; j < grids[i].seq2evencorner.size(); ++j) {
dptr[j] = grids[i].seq2evencorner[j];
}
dptr = grid_oddcorners[i].data_ptr<long>();
for (int j = 0; j < grids[i].seq2oddcorner.size(); ++j) {
dptr[j] = grids[i].seq2oddcorner[j];
}
if (i + 1 < grids.size()) {
grid_downsamples[i] = torch::zeros({grids[i].downsample_seq.size()}, int64_options);
long* dptr = grid_downsamples[i].data_ptr<long>();
for (int j = 0; j < grids[i].downsample_seq.size(); ++j) {
dptr[j] = grids[i].downsample_seq[j];
}
}
}
return {texture_positions, texture_feats, grid_neighbors, grid_downsamples, grid_evencorners, grid_oddcorners};
}

View File

@@ -0,0 +1,139 @@
#include "rasterizer.h"
void rasterizeTriangleCPU(int idx, float* vt0, float* vt1, float* vt2, int width, int height, INT64* zbuffer, float* d, float occlusion_truncation) {
float x_min = std::min(vt0[0], std::min(vt1[0],vt2[0]));
float x_max = std::max(vt0[0], std::max(vt1[0],vt2[0]));
float y_min = std::min(vt0[1], std::min(vt1[1],vt2[1]));
float y_max = std::max(vt0[1], std::max(vt1[1],vt2[1]));
for (int px = x_min; px < x_max + 1; ++px) {
if (px < 0 || px >= width)
continue;
for (int py = y_min; py < y_max + 1; ++py) {
if (py < 0 || py >= height)
continue;
float vt[2] = {px + 0.5, py + 0.5};
float baryCentricCoordinate[3];
calculateBarycentricCoordinate(vt0, vt1, vt2, vt, baryCentricCoordinate);
if (isBarycentricCoordInBounds(baryCentricCoordinate)) {
int pixel = py * width + px;
if (zbuffer == 0) {
zbuffer[pixel] = (INT64)(idx + 1);
continue;
}
float depth = baryCentricCoordinate[0] * vt0[2] + baryCentricCoordinate[1] * vt1[2] + baryCentricCoordinate[2] * vt2[2];
float depth_thres = 0;
if (d) {
depth_thres = d[pixel] * 0.49999f + 0.5f + occlusion_truncation;
}
int z_quantize = depth * (2<<17);
INT64 token = (INT64)z_quantize * MAXINT + (INT64)(idx + 1);
if (depth < depth_thres)
continue;
zbuffer[pixel] = std::min(zbuffer[pixel], token);
}
}
}
}
void barycentricFromImgcoordCPU(float* V, int* F, int* findices, INT64* zbuffer, int width, int height, int num_vertices, int num_faces,
float* barycentric_map, int pix)
{
INT64 f = zbuffer[pix] % MAXINT;
if (f == (MAXINT-1)) {
findices[pix] = 0;
barycentric_map[pix * 3] = 0;
barycentric_map[pix * 3 + 1] = 0;
barycentric_map[pix * 3 + 2] = 0;
return;
}
findices[pix] = f;
f -= 1;
float barycentric[3] = {0, 0, 0};
if (f >= 0) {
float vt[2] = {float(pix % width) + 0.5f, float(pix / width) + 0.5f};
float* vt0_ptr = V + (F[f * 3] * 4);
float* vt1_ptr = V + (F[f * 3 + 1] * 4);
float* vt2_ptr = V + (F[f * 3 + 2] * 4);
float vt0[2] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f};
float vt1[2] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f};
float vt2[2] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f};
calculateBarycentricCoordinate(vt0, vt1, vt2, vt, barycentric);
barycentric[0] = barycentric[0] / vt0_ptr[3];
barycentric[1] = barycentric[1] / vt1_ptr[3];
barycentric[2] = barycentric[2] / vt2_ptr[3];
float w = 1.0f / (barycentric[0] + barycentric[1] + barycentric[2]);
barycentric[0] *= w;
barycentric[1] *= w;
barycentric[2] *= w;
}
barycentric_map[pix * 3] = barycentric[0];
barycentric_map[pix * 3 + 1] = barycentric[1];
barycentric_map[pix * 3 + 2] = barycentric[2];
}
void rasterizeImagecoordsKernelCPU(float* V, int* F, float* d, INT64* zbuffer, float occlusion_trunc, int width, int height, int num_vertices, int num_faces, int f)
{
float* vt0_ptr = V + (F[f * 3] * 4);
float* vt1_ptr = V + (F[f * 3 + 1] * 4);
float* vt2_ptr = V + (F[f * 3 + 2] * 4);
float vt0[3] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f, vt0_ptr[2] / vt0_ptr[3] * 0.49999f + 0.5f};
float vt1[3] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f, vt1_ptr[2] / vt1_ptr[3] * 0.49999f + 0.5f};
float vt2[3] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f, vt2_ptr[2] / vt2_ptr[3] * 0.49999f + 0.5f};
rasterizeTriangleCPU(f, vt0, vt1, vt2, width, height, zbuffer, d, occlusion_trunc);
}
std::vector<torch::Tensor> rasterize_image_cpu(torch::Tensor V, torch::Tensor F, torch::Tensor D,
int width, int height, float occlusion_truncation, int use_depth_prior)
{
int num_faces = F.size(0);
int num_vertices = V.size(0);
auto options = torch::TensorOptions().dtype(torch::kInt32).requires_grad(false);
auto INT64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
auto findices = torch::zeros({height, width}, options);
INT64 maxint = (INT64)MAXINT * (INT64)MAXINT + (MAXINT - 1);
auto z_min = torch::ones({height, width}, INT64_options) * (long)maxint;
if (!use_depth_prior) {
for (int i = 0; i < num_faces; ++i) {
rasterizeImagecoordsKernelCPU(V.data_ptr<float>(), F.data_ptr<int>(), 0,
(INT64*)z_min.data_ptr<long>(), occlusion_truncation, width, height, num_vertices, num_faces, i);
}
} else {
for (int i = 0; i < num_faces; ++i)
rasterizeImagecoordsKernelCPU(V.data_ptr<float>(), F.data_ptr<int>(), D.data_ptr<float>(),
(INT64*)z_min.data_ptr<long>(), occlusion_truncation, width, height, num_vertices, num_faces, i);
}
auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
auto barycentric = torch::zeros({height, width, 3}, float_options);
for (int i = 0; i < width * height; ++i)
barycentricFromImgcoordCPU(V.data_ptr<float>(), F.data_ptr<int>(),
findices.data_ptr<int>(), (INT64*)z_min.data_ptr<long>(), width, height, num_vertices, num_faces, barycentric.data_ptr<float>(), i);
return {findices, barycentric};
}
std::vector<torch::Tensor> rasterize_image(torch::Tensor V, torch::Tensor F, torch::Tensor D,
int width, int height, float occlusion_truncation, int use_depth_prior)
{
int device_id = V.get_device();
if (device_id == -1)
return rasterize_image_cpu(V, F, D, width, height, occlusion_truncation, use_depth_prior);
else
return rasterize_image_gpu(V, F, D, width, height, occlusion_truncation, use_depth_prior);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("rasterize_image", &rasterize_image, "Custom image rasterization");
m.def("build_hierarchy", &build_hierarchy, "Custom image rasterization");
m.def("build_hierarchy_with_feat", &build_hierarchy_with_feat, "Custom image rasterization");
}

View File

@@ -0,0 +1,54 @@
#ifndef RASTERIZER_H_
#define RASTERIZER_H_
#include <torch/extension.h>
#include <vector>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h> // For CUDA context
#define INT64 unsigned long long
#define MAXINT 2147483647
__host__ __device__ inline float calculateSignedArea2(float* a, float* b, float* c) {
return ((c[0] - a[0]) * (b[1] - a[1]) - (b[0] - a[0]) * (c[1] - a[1]));
}
__host__ __device__ inline void calculateBarycentricCoordinate(float* a, float* b, float* c, float* p,
float* barycentric)
{
float beta_tri = calculateSignedArea2(a, p, c);
float gamma_tri = calculateSignedArea2(a, b, p);
float area = calculateSignedArea2(a, b, c);
if (area == 0) {
barycentric[0] = -1.0;
barycentric[1] = -1.0;
barycentric[2] = -1.0;
return;
}
float tri_inv = 1.0 / area;
float beta = beta_tri * tri_inv;
float gamma = gamma_tri * tri_inv;
float alpha = 1.0 - beta - gamma;
barycentric[0] = alpha;
barycentric[1] = beta;
barycentric[2] = gamma;
}
__host__ __device__ inline bool isBarycentricCoordInBounds(float* barycentricCoord) {
return barycentricCoord[0] >= 0.0 && barycentricCoord[0] <= 1.0 &&
barycentricCoord[1] >= 0.0 && barycentricCoord[1] <= 1.0 &&
barycentricCoord[2] >= 0.0 && barycentricCoord[2] <= 1.0;
}
std::vector<torch::Tensor> rasterize_image_gpu(torch::Tensor V, torch::Tensor F, torch::Tensor D,
int width, int height, float occlusion_truncation, int use_depth_prior);
std::vector<std::vector<torch::Tensor>> build_hierarchy(std::vector<torch::Tensor> view_layer_positions, std::vector<torch::Tensor> view_layer_normals, int num_level, int resolution);
std::vector<std::vector<torch::Tensor>> build_hierarchy_with_feat(
std::vector<torch::Tensor> view_layer_positions,
std::vector<torch::Tensor> view_layer_normals,
std::vector<torch::Tensor> view_layer_feats,
int num_level, int resolution);
#endif

View File

@@ -0,0 +1,127 @@
#include "rasterizer.h"
__device__ void rasterizeTriangleGPU(int idx, float* vt0, float* vt1, float* vt2, int width, int height, INT64* zbuffer, float* d, float occlusion_truncation) {
float x_min = std::min(vt0[0], std::min(vt1[0],vt2[0]));
float x_max = std::max(vt0[0], std::max(vt1[0],vt2[0]));
float y_min = std::min(vt0[1], std::min(vt1[1],vt2[1]));
float y_max = std::max(vt0[1], std::max(vt1[1],vt2[1]));
for (int px = x_min; px < x_max + 1; ++px) {
if (px < 0 || px >= width)
continue;
for (int py = y_min; py < y_max + 1; ++py) {
if (py < 0 || py >= height)
continue;
float vt[2] = {px + 0.5f, py + 0.5f};
float baryCentricCoordinate[3];
calculateBarycentricCoordinate(vt0, vt1, vt2, vt, baryCentricCoordinate);
if (isBarycentricCoordInBounds(baryCentricCoordinate)) {
int pixel = py * width + px;
if (zbuffer == 0) {
atomicExch(&zbuffer[pixel], (INT64)(idx + 1));
continue;
}
float depth = baryCentricCoordinate[0] * vt0[2] + baryCentricCoordinate[1] * vt1[2] + baryCentricCoordinate[2] * vt2[2];
float depth_thres = 0;
if (d) {
depth_thres = d[pixel] * 0.49999f + 0.5f + occlusion_truncation;
}
int z_quantize = depth * (2<<17);
INT64 token = (INT64)z_quantize * MAXINT + (INT64)(idx + 1);
if (depth < depth_thres)
continue;
atomicMin(&zbuffer[pixel], token);
}
}
}
}
__global__ void barycentricFromImgcoordGPU(float* V, int* F, int* findices, INT64* zbuffer, int width, int height, int num_vertices, int num_faces,
float* barycentric_map)
{
int pix = blockIdx.x * blockDim.x + threadIdx.x;
if (pix >= width * height)
return;
INT64 f = zbuffer[pix] % MAXINT;
if (f == (MAXINT-1)) {
findices[pix] = 0;
barycentric_map[pix * 3] = 0;
barycentric_map[pix * 3 + 1] = 0;
barycentric_map[pix * 3 + 2] = 0;
return;
}
findices[pix] = f;
f -= 1;
float barycentric[3] = {0, 0, 0};
if (f >= 0) {
float vt[2] = {float(pix % width) + 0.5f, float(pix / width) + 0.5f};
float* vt0_ptr = V + (F[f * 3] * 4);
float* vt1_ptr = V + (F[f * 3 + 1] * 4);
float* vt2_ptr = V + (F[f * 3 + 2] * 4);
float vt0[2] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f};
float vt1[2] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f};
float vt2[2] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f};
calculateBarycentricCoordinate(vt0, vt1, vt2, vt, barycentric);
barycentric[0] = barycentric[0] / vt0_ptr[3];
barycentric[1] = barycentric[1] / vt1_ptr[3];
barycentric[2] = barycentric[2] / vt2_ptr[3];
float w = 1.0f / (barycentric[0] + barycentric[1] + barycentric[2]);
barycentric[0] *= w;
barycentric[1] *= w;
barycentric[2] *= w;
}
barycentric_map[pix * 3] = barycentric[0];
barycentric_map[pix * 3 + 1] = barycentric[1];
barycentric_map[pix * 3 + 2] = barycentric[2];
}
__global__ void rasterizeImagecoordsKernelGPU(float* V, int* F, float* d, INT64* zbuffer, float occlusion_trunc, int width, int height, int num_vertices, int num_faces)
{
int f = blockIdx.x * blockDim.x + threadIdx.x;
if (f >= num_faces)
return;
float* vt0_ptr = V + (F[f * 3] * 4);
float* vt1_ptr = V + (F[f * 3 + 1] * 4);
float* vt2_ptr = V + (F[f * 3 + 2] * 4);
float vt0[3] = {(vt0_ptr[0] / vt0_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt0_ptr[1] / vt0_ptr[3]) * (height - 1) + 0.5f, vt0_ptr[2] / vt0_ptr[3] * 0.49999f + 0.5f};
float vt1[3] = {(vt1_ptr[0] / vt1_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt1_ptr[1] / vt1_ptr[3]) * (height - 1) + 0.5f, vt1_ptr[2] / vt1_ptr[3] * 0.49999f + 0.5f};
float vt2[3] = {(vt2_ptr[0] / vt2_ptr[3] * 0.5f + 0.5f) * (width - 1) + 0.5f, (0.5f + 0.5f * vt2_ptr[1] / vt2_ptr[3]) * (height - 1) + 0.5f, vt2_ptr[2] / vt2_ptr[3] * 0.49999f + 0.5f};
rasterizeTriangleGPU(f, vt0, vt1, vt2, width, height, zbuffer, d, occlusion_trunc);
}
std::vector<torch::Tensor> rasterize_image_gpu(torch::Tensor V, torch::Tensor F, torch::Tensor D,
int width, int height, float occlusion_truncation, int use_depth_prior)
{
int device_id = V.get_device();
cudaSetDevice(device_id);
int num_faces = F.size(0);
int num_vertices = V.size(0);
auto options = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, device_id).requires_grad(false);
auto INT64_options = torch::TensorOptions().dtype(torch::kInt64).device(torch::kCUDA, device_id).requires_grad(false);
auto findices = torch::zeros({height, width}, options);
INT64 maxint = (INT64)MAXINT * (INT64)MAXINT + (MAXINT - 1);
auto z_min = torch::ones({height, width}, INT64_options) * (long)maxint;
if (!use_depth_prior) {
rasterizeImagecoordsKernelGPU<<<(num_faces+255)/256,256,0,at::cuda::getCurrentCUDAStream()>>>(V.data_ptr<float>(), F.data_ptr<int>(), 0,
(INT64*)z_min.data_ptr<long>(), occlusion_truncation, width, height, num_vertices, num_faces);
} else {
rasterizeImagecoordsKernelGPU<<<(num_faces+255)/256,256,0,at::cuda::getCurrentCUDAStream()>>>(V.data_ptr<float>(), F.data_ptr<int>(), D.data_ptr<float>(),
(INT64*)z_min.data_ptr<long>(), occlusion_truncation, width, height, num_vertices, num_faces);
}
auto float_options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, device_id).requires_grad(false);
auto barycentric = torch::zeros({height, width, 3}, float_options);
barycentricFromImgcoordGPU<<<(width * height + 255)/256, 256>>>(V.data_ptr<float>(), F.data_ptr<int>(),
findices.data_ptr<int>(), (INT64*)z_min.data_ptr<long>(), width, height, num_vertices, num_faces, barycentric.data_ptr<float>());
return {findices, barycentric};
}

View File

@@ -0,0 +1,40 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
from setuptools import setup, find_packages
import torch
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
# build custom rasterizer
custom_rasterizer_module = CUDAExtension(
"custom_rasterizer_kernel",
[
"lib/custom_rasterizer_kernel/rasterizer.cpp",
"lib/custom_rasterizer_kernel/grid_neighbor.cpp",
"lib/custom_rasterizer_kernel/rasterizer_gpu.cu",
],
)
setup(
packages=find_packages(),
version="0.1",
name="custom_rasterizer",
include_package_data=True,
package_dir={"": "."},
ext_modules=[
custom_rasterizer_module,
],
cmdclass={"build_ext": BuildExtension},
)

35
hy3dpaint/demo.py Normal file
View File

@@ -0,0 +1,35 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
from textureGenPipeline import Hunyuan3DPaintPipeline, Hunyuan3DPaintConfig
try:
from utils.torchvision_fix import apply_fix
apply_fix()
except ImportError:
print("Warning: torchvision_fix module not found, proceeding without compatibility fix")
except Exception as e:
print(f"Warning: Failed to apply torchvision fix: {e}")
if __name__ == "__main__":
max_num_view = 6 # can be 6 to 9
resolution = 512 # can be 768 or 512
conf = Hunyuan3DPaintConfig(max_num_view, resolution)
paint_pipeline = Hunyuan3DPaintPipeline(conf)
output_mesh_path = paint_pipeline(mesh_path="./assets/case_1/mesh.glb", image_path="./assets/case_1/image.png")
print(f"Output mesh path: {output_mesh_path}")

View File

@@ -0,0 +1,39 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
from .pipeline import HunyuanPaintPipeline
from .model import HunyuanPaint
from .modules import (
Dino_v2,
Basic2p5DTransformerBlock,
ImageProjModel,
UNet2p5DConditionModel,
)
from .attn_processor import (
PoseRoPEAttnProcessor2_0,
SelfAttnProcessor2_0,
RefAttnProcessor2_0,
)
__all__ = [
'HunyuanPaintPipeline',
'HunyuanPaint',
'Dino_v2',
'Basic2p5DTransformerBlock',
'ImageProjModel',
'UNet2p5DConditionModel',
'PoseRoPEAttnProcessor2_0',
'SelfAttnProcessor2_0',
'RefAttnProcessor2_0',
]

View File

@@ -0,0 +1,839 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Dict, Tuple, Union, Literal, List, Callable
from einops import rearrange
from diffusers.utils import deprecate
from diffusers.models.attention_processor import Attention, AttnProcessor
class AttnUtils:
"""
Shared utility functions for attention processing.
This class provides common operations used across different attention processors
to eliminate code duplication and improve maintainability.
"""
@staticmethod
def check_pytorch_compatibility():
"""
Check PyTorch compatibility for scaled_dot_product_attention.
Raises:
ImportError: If PyTorch version doesn't support scaled_dot_product_attention
"""
if not hasattr(F, "scaled_dot_product_attention"):
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
@staticmethod
def handle_deprecation_warning(args, kwargs):
"""
Handle deprecation warning for the 'scale' argument.
Args:
args: Positional arguments passed to attention processor
kwargs: Keyword arguments passed to attention processor
"""
if len(args) > 0 or kwargs.get("scale", None) is not None:
deprecation_message = (
"The `scale` argument is deprecated and will be ignored."
"Please remove it, as passing it will raise an error in the future."
"`scale` should directly be passed while calling the underlying pipeline component"
"i.e., via `cross_attention_kwargs`."
)
deprecate("scale", "1.0.0", deprecation_message)
@staticmethod
def prepare_hidden_states(
hidden_states, attn, temb, spatial_norm_attr="spatial_norm", group_norm_attr="group_norm"
):
"""
Common preprocessing of hidden states for attention computation.
Args:
hidden_states: Input hidden states tensor
attn: Attention module instance
temb: Optional temporal embedding tensor
spatial_norm_attr: Attribute name for spatial normalization
group_norm_attr: Attribute name for group normalization
Returns:
Tuple of (processed_hidden_states, residual, input_ndim, shape_info)
"""
residual = hidden_states
spatial_norm = getattr(attn, spatial_norm_attr, None)
if spatial_norm is not None:
hidden_states = spatial_norm(hidden_states, temb)
input_ndim = hidden_states.ndim
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
else:
batch_size, channel, height, width = None, None, None, None
group_norm = getattr(attn, group_norm_attr, None)
if group_norm is not None:
hidden_states = group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
return hidden_states, residual, input_ndim, (batch_size, channel, height, width)
@staticmethod
def prepare_attention_mask(attention_mask, attn, sequence_length, batch_size):
"""
Prepare attention mask for scaled_dot_product_attention.
Args:
attention_mask: Input attention mask tensor or None
attn: Attention module instance
sequence_length: Length of the sequence
batch_size: Batch size
Returns:
Prepared attention mask tensor reshaped for multi-head attention
"""
if attention_mask is not None:
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
return attention_mask
@staticmethod
def reshape_qkv_for_attention(tensor, batch_size, attn_heads, head_dim):
"""
Reshape Q/K/V tensors for multi-head attention computation.
Args:
tensor: Input tensor to reshape
batch_size: Batch size
attn_heads: Number of attention heads
head_dim: Dimension per attention head
Returns:
Reshaped tensor with shape [batch_size, attn_heads, seq_len, head_dim]
"""
return tensor.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2)
@staticmethod
def apply_norms(query, key, norm_q, norm_k):
"""
Apply Q/K normalization layers if available.
Args:
query: Query tensor
key: Key tensor
norm_q: Query normalization layer (optional)
norm_k: Key normalization layer (optional)
Returns:
Tuple of (normalized_query, normalized_key)
"""
if norm_q is not None:
query = norm_q(query)
if norm_k is not None:
key = norm_k(key)
return query, key
@staticmethod
def finalize_output(hidden_states, input_ndim, shape_info, attn, residual, to_out):
"""
Common output processing including projection, dropout, reshaping, and residual connection.
Args:
hidden_states: Processed hidden states from attention
input_ndim: Original input tensor dimensions
shape_info: Tuple containing original shape information
attn: Attention module instance
residual: Residual connection tensor
to_out: Output projection layers [linear, dropout]
Returns:
Final output tensor after all processing steps
"""
batch_size, channel, height, width = shape_info
# Apply output projection and dropout
hidden_states = to_out[0](hidden_states)
hidden_states = to_out[1](hidden_states)
# Reshape back if needed
if input_ndim == 4:
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
# Apply residual connection
if attn.residual_connection:
hidden_states = hidden_states + residual
# Apply rescaling
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states
# Base class for attention processors (eliminating initialization duplication)
class BaseAttnProcessor(nn.Module):
"""
Base class for attention processors with common initialization.
This base class provides shared parameter initialization and module registration
functionality to reduce code duplication across different attention processor types.
"""
def __init__(
self,
query_dim: int,
pbr_setting: List[str] = ["albedo", "mr"],
cross_attention_dim: Optional[int] = None,
heads: int = 8,
kv_heads: Optional[int] = None,
dim_head: int = 64,
dropout: float = 0.0,
bias: bool = False,
upcast_attention: bool = False,
upcast_softmax: bool = False,
cross_attention_norm: Optional[str] = None,
cross_attention_norm_num_groups: int = 32,
qk_norm: Optional[str] = None,
added_kv_proj_dim: Optional[int] = None,
added_proj_bias: Optional[bool] = True,
norm_num_groups: Optional[int] = None,
spatial_norm_dim: Optional[int] = None,
out_bias: bool = True,
scale_qk: bool = True,
only_cross_attention: bool = False,
eps: float = 1e-5,
rescale_output_factor: float = 1.0,
residual_connection: bool = False,
_from_deprecated_attn_block: bool = False,
processor: Optional["AttnProcessor"] = None,
out_dim: int = None,
out_context_dim: int = None,
context_pre_only=None,
pre_only=False,
elementwise_affine: bool = True,
is_causal: bool = False,
**kwargs,
):
"""
Initialize base attention processor with common parameters.
Args:
query_dim: Dimension of query features
pbr_setting: List of PBR material types to process (e.g., ["albedo", "mr"])
cross_attention_dim: Dimension of cross-attention features (optional)
heads: Number of attention heads
kv_heads: Number of key-value heads for grouped query attention (optional)
dim_head: Dimension per attention head
dropout: Dropout rate
bias: Whether to use bias in linear projections
upcast_attention: Whether to upcast attention computation to float32
upcast_softmax: Whether to upcast softmax computation to float32
cross_attention_norm: Type of cross-attention normalization (optional)
cross_attention_norm_num_groups: Number of groups for cross-attention norm
qk_norm: Type of query-key normalization (optional)
added_kv_proj_dim: Dimension for additional key-value projections (optional)
added_proj_bias: Whether to use bias in additional projections
norm_num_groups: Number of groups for normalization (optional)
spatial_norm_dim: Dimension for spatial normalization (optional)
out_bias: Whether to use bias in output projection
scale_qk: Whether to scale query-key products
only_cross_attention: Whether to only perform cross-attention
eps: Small epsilon value for numerical stability
rescale_output_factor: Factor to rescale output values
residual_connection: Whether to use residual connections
_from_deprecated_attn_block: Flag for deprecated attention blocks
processor: Optional attention processor instance
out_dim: Output dimension (optional)
out_context_dim: Output context dimension (optional)
context_pre_only: Whether to only process context in pre-processing
pre_only: Whether to only perform pre-processing
elementwise_affine: Whether to use element-wise affine transformations
is_causal: Whether to use causal attention masking
**kwargs: Additional keyword arguments
"""
super().__init__()
AttnUtils.check_pytorch_compatibility()
# Store common attributes
self.pbr_setting = pbr_setting
self.n_pbr_tokens = len(self.pbr_setting)
self.inner_dim = out_dim if out_dim is not None else dim_head * heads
self.inner_kv_dim = self.inner_dim if kv_heads is None else dim_head * kv_heads
self.query_dim = query_dim
self.use_bias = bias
self.is_cross_attention = cross_attention_dim is not None
self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
self.upcast_attention = upcast_attention
self.upcast_softmax = upcast_softmax
self.rescale_output_factor = rescale_output_factor
self.residual_connection = residual_connection
self.dropout = dropout
self.fused_projections = False
self.out_dim = out_dim if out_dim is not None else query_dim
self.out_context_dim = out_context_dim if out_context_dim is not None else query_dim
self.context_pre_only = context_pre_only
self.pre_only = pre_only
self.is_causal = is_causal
self._from_deprecated_attn_block = _from_deprecated_attn_block
self.scale_qk = scale_qk
self.scale = dim_head**-0.5 if self.scale_qk else 1.0
self.heads = out_dim // dim_head if out_dim is not None else heads
self.sliceable_head_dim = heads
self.added_kv_proj_dim = added_kv_proj_dim
self.only_cross_attention = only_cross_attention
self.added_proj_bias = added_proj_bias
# Validation
if self.added_kv_proj_dim is None and self.only_cross_attention:
raise ValueError(
"`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None."
"Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`."
)
def register_pbr_modules(self, module_types: List[str], **kwargs):
"""
Generic PBR module registration to eliminate code repetition.
Dynamically registers PyTorch modules for different PBR material types
based on the specified module types and PBR settings.
Args:
module_types: List of module types to register ("qkv", "v_only", "out", "add_kv")
**kwargs: Additional arguments for module configuration
"""
for pbr_token in self.pbr_setting:
if pbr_token == "albedo":
continue
for module_type in module_types:
if module_type == "qkv":
self.register_module(
f"to_q_{pbr_token}", nn.Linear(self.query_dim, self.inner_dim, bias=self.use_bias)
)
self.register_module(
f"to_k_{pbr_token}", nn.Linear(self.cross_attention_dim, self.inner_dim, bias=self.use_bias)
)
self.register_module(
f"to_v_{pbr_token}", nn.Linear(self.cross_attention_dim, self.inner_dim, bias=self.use_bias)
)
elif module_type == "v_only":
self.register_module(
f"to_v_{pbr_token}", nn.Linear(self.cross_attention_dim, self.inner_dim, bias=self.use_bias)
)
elif module_type == "out":
if not self.pre_only:
self.register_module(
f"to_out_{pbr_token}",
nn.ModuleList(
[
nn.Linear(self.inner_dim, self.out_dim, bias=kwargs.get("out_bias", True)),
nn.Dropout(self.dropout),
]
),
)
else:
self.register_module(f"to_out_{pbr_token}", None)
elif module_type == "add_kv":
if self.added_kv_proj_dim is not None:
self.register_module(
f"add_k_proj_{pbr_token}",
nn.Linear(self.added_kv_proj_dim, self.inner_kv_dim, bias=self.added_proj_bias),
)
self.register_module(
f"add_v_proj_{pbr_token}",
nn.Linear(self.added_kv_proj_dim, self.inner_kv_dim, bias=self.added_proj_bias),
)
else:
self.register_module(f"add_k_proj_{pbr_token}", None)
self.register_module(f"add_v_proj_{pbr_token}", None)
# Rotary Position Embedding utilities (specialized for PoseRoPE)
class RotaryEmbedding:
"""
Rotary position embedding utilities for 3D spatial attention.
Provides functions to compute and apply rotary position embeddings (RoPE)
for 1D, 3D spatial coordinates used in 3D-aware attention mechanisms.
"""
@staticmethod
def get_1d_rotary_pos_embed(dim: int, pos: torch.Tensor, theta: float = 10000.0, linear_factor=1.0, ntk_factor=1.0):
"""
Compute 1D rotary position embeddings.
Args:
dim: Embedding dimension (must be even)
pos: Position tensor
theta: Base frequency for rotary embeddings
linear_factor: Linear scaling factor
ntk_factor: NTK (Neural Tangent Kernel) scaling factor
Returns:
Tuple of (cos_embeddings, sin_embeddings)
"""
assert dim % 2 == 0
theta = theta * ntk_factor
freqs = (
1.0
/ (theta ** (torch.arange(0, dim, 2, dtype=pos.dtype, device=pos.device)[: (dim // 2)] / dim))
/ linear_factor
)
freqs = torch.outer(pos, freqs)
freqs_cos = freqs.cos().repeat_interleave(2, dim=1).float()
freqs_sin = freqs.sin().repeat_interleave(2, dim=1).float()
return freqs_cos, freqs_sin
@staticmethod
def get_3d_rotary_pos_embed(position, embed_dim, voxel_resolution, theta: int = 10000):
"""
Compute 3D rotary position embeddings for spatial coordinates.
Args:
position: 3D position tensor with shape [..., 3]
embed_dim: Embedding dimension
voxel_resolution: Resolution of the voxel grid
theta: Base frequency for rotary embeddings
Returns:
Tuple of (cos_embeddings, sin_embeddings) for 3D positions
"""
assert position.shape[-1] == 3
dim_xy = embed_dim // 8 * 3
dim_z = embed_dim // 8 * 2
grid = torch.arange(voxel_resolution, dtype=torch.float32, device=position.device)
freqs_xy = RotaryEmbedding.get_1d_rotary_pos_embed(dim_xy, grid, theta=theta)
freqs_z = RotaryEmbedding.get_1d_rotary_pos_embed(dim_z, grid, theta=theta)
xy_cos, xy_sin = freqs_xy
z_cos, z_sin = freqs_z
embed_flattn = position.view(-1, position.shape[-1])
x_cos = xy_cos[embed_flattn[:, 0], :]
x_sin = xy_sin[embed_flattn[:, 0], :]
y_cos = xy_cos[embed_flattn[:, 1], :]
y_sin = xy_sin[embed_flattn[:, 1], :]
z_cos = z_cos[embed_flattn[:, 2], :]
z_sin = z_sin[embed_flattn[:, 2], :]
cos = torch.cat((x_cos, y_cos, z_cos), dim=-1)
sin = torch.cat((x_sin, y_sin, z_sin), dim=-1)
cos = cos.view(*position.shape[:-1], embed_dim)
sin = sin.view(*position.shape[:-1], embed_dim)
return cos, sin
@staticmethod
def apply_rotary_emb(x: torch.Tensor, freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]]):
"""
Apply rotary position embeddings to input tensor.
Args:
x: Input tensor to apply rotary embeddings to
freqs_cis: Tuple of (cos_embeddings, sin_embeddings) or single tensor
Returns:
Tensor with rotary position embeddings applied
"""
cos, sin = freqs_cis
cos, sin = cos.to(x.device), sin.to(x.device)
cos = cos.unsqueeze(1)
sin = sin.unsqueeze(1)
x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1)
x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3)
out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype)
return out
# Core attention processing logic (eliminating major duplication)
class AttnCore:
"""
Core attention processing logic shared across processors.
This class provides the fundamental attention computation pipeline
that can be reused across different attention processor implementations.
"""
@staticmethod
def process_attention_base(
attn: Attention,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
temb: Optional[torch.Tensor] = None,
get_qkv_fn: Callable = None,
apply_rope_fn: Optional[Callable] = None,
**kwargs,
):
"""
Generic attention processing core shared across different processors.
This function implements the common attention computation pipeline including:
1. Hidden state preprocessing
2. Attention mask preparation
3. Q/K/V computation via provided function
4. Tensor reshaping for multi-head attention
5. Optional normalization and RoPE application
6. Scaled dot-product attention computation
Args:
attn: Attention module instance
hidden_states: Input hidden states tensor
encoder_hidden_states: Optional encoder hidden states for cross-attention
attention_mask: Optional attention mask tensor
temb: Optional temporal embedding tensor
get_qkv_fn: Function to compute Q, K, V tensors
apply_rope_fn: Optional function to apply rotary position embeddings
**kwargs: Additional keyword arguments passed to subfunctions
Returns:
Tuple containing (attention_output, residual, input_ndim, shape_info,
batch_size, num_heads, head_dim)
"""
# Prepare hidden states
hidden_states, residual, input_ndim, shape_info = AttnUtils.prepare_hidden_states(hidden_states, attn, temb)
batch_size, sequence_length, _ = (
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
)
# Prepare attention mask
attention_mask = AttnUtils.prepare_attention_mask(attention_mask, attn, sequence_length, batch_size)
# Get Q, K, V
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
query, key, value = get_qkv_fn(attn, hidden_states, encoder_hidden_states, **kwargs)
# Reshape for attention
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = AttnUtils.reshape_qkv_for_attention(query, batch_size, attn.heads, head_dim)
key = AttnUtils.reshape_qkv_for_attention(key, batch_size, attn.heads, head_dim)
value = AttnUtils.reshape_qkv_for_attention(value, batch_size, attn.heads, value.shape[-1] // attn.heads)
# Apply normalization
query, key = AttnUtils.apply_norms(query, key, getattr(attn, "norm_q", None), getattr(attn, "norm_k", None))
# Apply RoPE if provided
if apply_rope_fn is not None:
query, key = apply_rope_fn(query, key, head_dim, **kwargs)
# Compute attention
hidden_states = F.scaled_dot_product_attention(
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
)
return hidden_states, residual, input_ndim, shape_info, batch_size, attn.heads, head_dim
# Specific processor implementations (minimal unique code)
class PoseRoPEAttnProcessor2_0:
"""
Attention processor with Rotary Position Encoding (RoPE) for 3D spatial awareness.
This processor extends standard attention with 3D rotary position embeddings
to provide spatial awareness for 3D scene understanding tasks.
"""
def __init__(self):
"""Initialize the RoPE attention processor."""
AttnUtils.check_pytorch_compatibility()
def __call__(
self,
attn: Attention,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_indices: Dict = None,
temb: Optional[torch.Tensor] = None,
n_pbrs=1,
*args,
**kwargs,
) -> torch.Tensor:
"""
Apply RoPE-enhanced attention computation.
Args:
attn: Attention module instance
hidden_states: Input hidden states tensor
encoder_hidden_states: Optional encoder hidden states for cross-attention
attention_mask: Optional attention mask tensor
position_indices: Dictionary containing 3D position information for RoPE
temb: Optional temporal embedding tensor
n_pbrs: Number of PBR material types
*args: Additional positional arguments
**kwargs: Additional keyword arguments
Returns:
Attention output tensor with applied rotary position encodings
"""
AttnUtils.handle_deprecation_warning(args, kwargs)
def get_qkv(attn, hidden_states, encoder_hidden_states, **kwargs):
return attn.to_q(hidden_states), attn.to_k(encoder_hidden_states), attn.to_v(encoder_hidden_states)
def apply_rope(query, key, head_dim, **kwargs):
if position_indices is not None:
if head_dim in position_indices:
image_rotary_emb = position_indices[head_dim]
else:
image_rotary_emb = RotaryEmbedding.get_3d_rotary_pos_embed(
rearrange(
position_indices["voxel_indices"].unsqueeze(1).repeat(1, n_pbrs, 1, 1),
"b n_pbrs l c -> (b n_pbrs) l c",
),
head_dim,
voxel_resolution=position_indices["voxel_resolution"],
)
position_indices[head_dim] = image_rotary_emb
query = RotaryEmbedding.apply_rotary_emb(query, image_rotary_emb)
key = RotaryEmbedding.apply_rotary_emb(key, image_rotary_emb)
return query, key
# Core attention processing
hidden_states, residual, input_ndim, shape_info, batch_size, heads, head_dim = AttnCore.process_attention_base(
attn,
hidden_states,
encoder_hidden_states,
attention_mask,
temb,
get_qkv_fn=get_qkv,
apply_rope_fn=apply_rope,
position_indices=position_indices,
n_pbrs=n_pbrs,
)
# Finalize output
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, heads * head_dim)
hidden_states = hidden_states.to(hidden_states.dtype)
return AttnUtils.finalize_output(hidden_states, input_ndim, shape_info, attn, residual, attn.to_out)
class SelfAttnProcessor2_0(BaseAttnProcessor):
"""
Self-attention processor with PBR (Physically Based Rendering) material support.
This processor handles multiple PBR material types (e.g., albedo, metallic-roughness)
with separate attention computation paths for each material type.
"""
def __init__(self, **kwargs):
"""
Initialize self-attention processor with PBR support.
Args:
**kwargs: Arguments passed to BaseAttnProcessor initialization
"""
super().__init__(**kwargs)
self.register_pbr_modules(["qkv", "out", "add_kv"], **kwargs)
def process_single(
self,
attn: Attention,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
temb: Optional[torch.Tensor] = None,
token: Literal["albedo", "mr"] = "albedo",
multiple_devices=False,
*args,
**kwargs,
):
"""
Process attention for a single PBR material type.
Args:
attn: Attention module instance
hidden_states: Input hidden states tensor
encoder_hidden_states: Optional encoder hidden states for cross-attention
attention_mask: Optional attention mask tensor
temb: Optional temporal embedding tensor
token: PBR material type to process ("albedo", "mr", etc.)
multiple_devices: Whether to use multiple GPU devices
*args: Additional positional arguments
**kwargs: Additional keyword arguments
Returns:
Processed attention output for the specified PBR material type
"""
target = attn if token == "albedo" else attn.processor
token_suffix = "" if token == "albedo" else "_" + token
# Device management (if needed)
if multiple_devices:
device = torch.device("cuda:0") if token == "albedo" else torch.device("cuda:1")
for attr in [f"to_q{token_suffix}", f"to_k{token_suffix}", f"to_v{token_suffix}", f"to_out{token_suffix}"]:
getattr(target, attr).to(device)
def get_qkv(attn, hidden_states, encoder_hidden_states, **kwargs):
return (
getattr(target, f"to_q{token_suffix}")(hidden_states),
getattr(target, f"to_k{token_suffix}")(encoder_hidden_states),
getattr(target, f"to_v{token_suffix}")(encoder_hidden_states),
)
# Core processing using shared logic
hidden_states, residual, input_ndim, shape_info, batch_size, heads, head_dim = AttnCore.process_attention_base(
attn, hidden_states, encoder_hidden_states, attention_mask, temb, get_qkv_fn=get_qkv
)
# Finalize
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, heads * head_dim)
hidden_states = hidden_states.to(hidden_states.dtype)
return AttnUtils.finalize_output(
hidden_states, input_ndim, shape_info, attn, residual, getattr(target, f"to_out{token_suffix}")
)
def __call__(
self,
attn: Attention,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
temb: Optional[torch.Tensor] = None,
*args,
**kwargs,
) -> torch.Tensor:
"""
Apply self-attention with PBR material processing.
Processes multiple PBR material types sequentially, applying attention
computation for each material type separately and combining results.
Args:
attn: Attention module instance
hidden_states: Input hidden states tensor with PBR dimension
encoder_hidden_states: Optional encoder hidden states for cross-attention
attention_mask: Optional attention mask tensor
temb: Optional temporal embedding tensor
*args: Additional positional arguments
**kwargs: Additional keyword arguments
Returns:
Combined attention output for all PBR material types
"""
AttnUtils.handle_deprecation_warning(args, kwargs)
B = hidden_states.size(0)
pbr_hidden_states = torch.split(hidden_states, 1, dim=1)
# Process each PBR setting
results = []
for token, pbr_hs in zip(self.pbr_setting, pbr_hidden_states):
processed_hs = rearrange(pbr_hs, "b n_pbrs n l c -> (b n_pbrs n) l c").to("cuda:0")
result = self.process_single(attn, processed_hs, None, attention_mask, temb, token, False)
results.append(result)
outputs = [rearrange(result, "(b n_pbrs n) l c -> b n_pbrs n l c", b=B, n_pbrs=1) for result in results]
return torch.cat(outputs, dim=1)
class RefAttnProcessor2_0(BaseAttnProcessor):
"""
Reference attention processor with shared value computation across PBR materials.
This processor computes query and key once, but uses separate value projections
for different PBR material types, enabling efficient multi-material processing.
"""
def __init__(self, **kwargs):
"""
Initialize reference attention processor.
Args:
**kwargs: Arguments passed to BaseAttnProcessor initialization
"""
super().__init__(**kwargs)
self.pbr_settings = self.pbr_setting # Alias for compatibility
self.register_pbr_modules(["v_only", "out"], **kwargs)
def __call__(
self,
attn: Attention,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
temb: Optional[torch.Tensor] = None,
*args,
**kwargs,
) -> torch.Tensor:
"""
Apply reference attention with shared Q/K and separate V projections.
This method computes query and key tensors once and reuses them across
all PBR material types, while using separate value projections for each
material type to maintain material-specific information.
Args:
attn: Attention module instance
hidden_states: Input hidden states tensor
encoder_hidden_states: Optional encoder hidden states for cross-attention
attention_mask: Optional attention mask tensor
temb: Optional temporal embedding tensor
*args: Additional positional arguments
**kwargs: Additional keyword arguments
Returns:
Stacked attention output for all PBR material types
"""
AttnUtils.handle_deprecation_warning(args, kwargs)
def get_qkv(attn, hidden_states, encoder_hidden_states, **kwargs):
query = attn.to_q(hidden_states)
key = attn.to_k(encoder_hidden_states)
# Concatenate values from all PBR settings
value_list = [attn.to_v(encoder_hidden_states)]
for token in ["_" + token for token in self.pbr_settings if token != "albedo"]:
value_list.append(getattr(attn.processor, f"to_v{token}")(encoder_hidden_states))
value = torch.cat(value_list, dim=-1)
return query, key, value
# Core processing
hidden_states, residual, input_ndim, shape_info, batch_size, heads, head_dim = AttnCore.process_attention_base(
attn, hidden_states, encoder_hidden_states, attention_mask, temb, get_qkv_fn=get_qkv
)
# Split and process each PBR setting output
hidden_states_list = torch.split(hidden_states, head_dim, dim=-1)
output_hidden_states_list = []
for i, hs in enumerate(hidden_states_list):
hs = hs.transpose(1, 2).reshape(batch_size, -1, heads * head_dim).to(hs.dtype)
token_suffix = "_" + self.pbr_settings[i] if self.pbr_settings[i] != "albedo" else ""
target = attn if self.pbr_settings[i] == "albedo" else attn.processor
hs = AttnUtils.finalize_output(
hs, input_ndim, shape_info, attn, residual, getattr(target, f"to_out{token_suffix}")
)
output_hidden_states_list.append(hs)
return torch.stack(output_hidden_states_list, dim=1)

View File

@@ -0,0 +1,623 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import os
# import ipdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from tqdm import tqdm
from torchvision.transforms import v2
from torchvision.utils import make_grid, save_image
from einops import rearrange
from diffusers import (
DiffusionPipeline,
EulerAncestralDiscreteScheduler,
DDPMScheduler,
UNet2DConditionModel,
ControlNetModel,
)
from .pipeline import UNet2p5DConditionModel
from .modules import Dino_v2
import math
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
class HunyuanPaint(pl.LightningModule):
def __init__(
self,
stable_diffusion_config,
control_net_config=None,
num_view=6,
view_size=320,
drop_cond_prob=0.1,
with_normal_map=None,
with_position_map=None,
pbr_settings=["albedo", "mr"],
**kwargs,
):
"""Initializes the HunyuanPaint Lightning Module.
Args:
stable_diffusion_config: Configuration for loading the Stable Diffusion pipeline
control_net_config: Configuration for ControlNet (optional)
num_view: Number of views to process
view_size: Size of input views (height/width)
drop_cond_prob: Probability of dropping conditioning input during training
with_normal_map: Flag indicating whether normal maps are used
with_position_map: Flag indicating whether position maps are used
pbr_settings: List of PBR materials to generate (e.g., albedo, metallic-roughness)
**kwargs: Additional keyword arguments
"""
super(HunyuanPaint, self).__init__()
self.num_view = num_view
self.view_size = view_size
self.drop_cond_prob = drop_cond_prob
self.pbr_settings = pbr_settings
# init modules
pipeline = DiffusionPipeline.from_pretrained(**stable_diffusion_config)
pipeline.set_pbr_settings(self.pbr_settings)
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(
pipeline.scheduler.config, timestep_spacing="trailing"
)
self.with_normal_map = with_normal_map
self.with_position_map = with_position_map
self.pipeline = pipeline
self.pipeline.vae.use_slicing = True
train_sched = DDPMScheduler.from_config(self.pipeline.scheduler.config)
if isinstance(self.pipeline.unet, UNet2DConditionModel):
self.pipeline.unet = UNet2p5DConditionModel(
self.pipeline.unet, train_sched, self.pipeline.scheduler, self.pbr_settings
)
self.train_scheduler = train_sched # use ddpm scheduler during training
self.register_schedule()
pipeline.set_learned_parameters()
if control_net_config is not None:
pipeline.unet = pipeline.unet.bfloat16().requires_grad_(control_net_config.train_unet)
self.pipeline.add_controlnet(
ControlNetModel.from_pretrained(control_net_config.pretrained_model_name_or_path),
conditioning_scale=0.75,
)
self.unet = pipeline.unet
self.pipeline.set_progress_bar_config(disable=True)
self.pipeline.vae = self.pipeline.vae.bfloat16()
self.pipeline.text_encoder = self.pipeline.text_encoder.bfloat16()
if self.unet.use_dino:
self.dino_v2 = Dino_v2("facebook/dinov2-giant")
self.dino_v2 = self.dino_v2.bfloat16()
self.validation_step_outputs = []
def register_schedule(self):
self.num_timesteps = self.train_scheduler.config.num_train_timesteps
betas = self.train_scheduler.betas.detach().cpu()
alphas = 1.0 - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = torch.cat([torch.ones(1, dtype=torch.float64), alphas_cumprod[:-1]], 0)
self.register_buffer("betas", betas.float())
self.register_buffer("alphas_cumprod", alphas_cumprod.float())
self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev.float())
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod).float())
self.register_buffer("sqrt_one_minus_alphas_cumprod", torch.sqrt(1 - alphas_cumprod).float())
self.register_buffer("sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod).float())
self.register_buffer("sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1).float())
def on_fit_start(self):
device = torch.device(f"cuda:{self.local_rank}")
self.pipeline.to(device)
if self.global_rank == 0:
os.makedirs(os.path.join(self.logdir, "images_val"), exist_ok=True)
def prepare_batch_data(self, batch):
"""Preprocesses a batch of input data for training/inference.
Args:
batch: Raw input batch dictionary
Returns:
tuple: Contains:
- cond_imgs: Primary conditioning images (B, 1, C, H, W)
- cond_imgs_another: Secondary conditioning images (B, 1, C, H, W)
- target_imgs: Dictionary of target PBR images resized and clamped
- images_normal: Preprocessed normal maps (if available)
- images_position: Preprocessed position maps (if available)
"""
images_cond = batch["images_cond"].to(self.device) # (B, M, C, H, W), where M is the number of reference images
cond_imgs, cond_imgs_another = images_cond[:, 0:1, ...], images_cond[:, 1:2, ...]
cond_size = self.view_size
cond_imgs = v2.functional.resize(cond_imgs, cond_size, interpolation=3, antialias=True).clamp(0, 1)
cond_imgs_another = v2.functional.resize(cond_imgs_another, cond_size, interpolation=3, antialias=True).clamp(
0, 1
)
target_imgs = {}
for pbr_token in self.pbr_settings:
target_imgs[pbr_token] = batch[f"images_{pbr_token}"].to(self.device)
target_imgs[pbr_token] = v2.functional.resize(
target_imgs[pbr_token], self.view_size, interpolation=3, antialias=True
).clamp(0, 1)
images_normal = None
if "images_normal" in batch:
images_normal = batch["images_normal"] # (B, N, C, H, W)
images_normal = v2.functional.resize(images_normal, self.view_size, interpolation=3, antialias=True).clamp(
0, 1
)
images_normal = [images_normal]
images_position = None
if "images_position" in batch:
images_position = batch["images_position"] # (B, N, C, H, W)
images_position = v2.functional.resize(
images_position, self.view_size, interpolation=3, antialias=True
).clamp(0, 1)
images_position = [images_position]
return cond_imgs, cond_imgs_another, target_imgs, images_normal, images_position
@torch.no_grad()
def forward_text_encoder(self, prompts):
device = next(self.pipeline.vae.parameters()).device
text_embeds = self.pipeline.encode_prompt(prompts, device, 1, False)[0]
return text_embeds
@torch.no_grad()
def encode_images(self, images):
"""Encodes input images into latent representations using the VAE.
Handles both standard input (B, N, C, H, W) and PBR input (B, N_pbrs, N, C, H, W)
Maintains original batch structure in output latents.
Args:
images: Input images tensor
Returns:
torch.Tensor: Latent representations with original batch dimensions preserved
"""
B = images.shape[0]
image_ndims = images.ndim
if image_ndims != 5:
N_pbrs, N = images.shape[1:3]
images = (
rearrange(images, "b n c h w -> (b n) c h w")
if image_ndims == 5
else rearrange(images, "b n_pbrs n c h w -> (b n_pbrs n) c h w")
)
dtype = next(self.pipeline.vae.parameters()).dtype
images = (images - 0.5) * 2.0
posterior = self.pipeline.vae.encode(images.to(dtype)).latent_dist
latents = posterior.sample() * self.pipeline.vae.config.scaling_factor
latents = (
rearrange(latents, "(b n) c h w -> b n c h w", b=B)
if image_ndims == 5
else rearrange(latents, "(b n_pbrs n) c h w -> b n_pbrs n c h w", b=B, n_pbrs=N_pbrs)
)
return latents
def forward_unet(self, latents, t, **cached_condition):
"""Runs the UNet model to predict noise/latent residuals.
Args:
latents: Noisy latent representations (B, C, H, W)
t: Timestep tensor (B,)
**cached_condition: Dictionary of conditioning inputs (text embeds, reference images, etc)
Returns:
torch.Tensor: UNet output (predicted noise or velocity)
"""
dtype = next(self.unet.parameters()).dtype
latents = latents.to(dtype)
shading_embeds = cached_condition["shading_embeds"]
pred_noise = self.pipeline.unet(latents, t, encoder_hidden_states=shading_embeds, **cached_condition)
return pred_noise[0]
def predict_start_from_z_and_v(self, x_t, t, v):
"""
Predicts clean image (x0) from noisy latents (x_t) and
velocity prediction (v) using the v-prediction formula.
Args:
x_t: Noisy latents at timestep t
t: Current timestep
v: Predicted velocity (v) from UNet
Returns:
torch.Tensor: Predicted clean image (x0)
"""
return (
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def get_v(self, x, noise, t):
"""Computes the target velocity (v) for v-prediction training.
Args:
x: Clean latents (x0)
noise: Added noise
t: Current timestep
Returns:
torch.Tensor: Target velocity
"""
return (
extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
)
def training_step(self, batch, batch_idx):
"""Performs a single training step with both conditioning paths.
Implements:
1. Dual-conditioning path training (main ref + secondary ref)
2. Velocity-prediction with consistency loss
3. Conditional dropout for robust learning
4. PBR-specific losses (albedo/metallic-roughness)
Args:
batch: Input batch from dataloader
batch_idx: Index of current batch
Returns:
torch.Tensor: Combined loss value
"""
cond_imgs, cond_imgs_another, target_imgs, normal_imgs, position_imgs = self.prepare_batch_data(batch)
B, N_ref = cond_imgs.shape[:2]
_, N_gen, _, H, W = target_imgs["albedo"].shape
N_pbrs = len(self.pbr_settings)
t = torch.randint(0, self.num_timesteps, size=(B,)).long().to(self.device)
t = t.unsqueeze(-1).repeat(1, N_pbrs, N_gen)
t = rearrange(t, "b n_pbrs n -> (b n_pbrs n)")
all_target_pbrs = []
for pbr_token in self.pbr_settings:
all_target_pbrs.append(target_imgs[pbr_token])
all_target_pbrs = torch.stack(all_target_pbrs, dim=0).transpose(1, 0)
gen_latents = self.encode_images(all_target_pbrs) #! B, N_pbrs N C H W
ref_latents = self.encode_images(cond_imgs) #! B, M, C, H, W
ref_latents_another = self.encode_images(cond_imgs_another) #! B, M, C, H, W
all_shading_tokens = []
for token in self.pbr_settings:
if token in ["albedo", "mr"]:
all_shading_tokens.append(
getattr(self.unet, f"learned_text_clip_{token}").unsqueeze(dim=0).repeat(B, 1, 1)
)
shading_embeds = torch.stack(all_shading_tokens, dim=1)
if self.unet.use_dino:
dino_hidden_states = self.dino_v2(cond_imgs[:, :1, ...])
dino_hidden_states_another = self.dino_v2(cond_imgs_another[:, :1, ...])
gen_latents = rearrange(gen_latents, "b n_pbrs n c h w -> (b n_pbrs n) c h w")
noise = torch.randn_like(gen_latents).to(self.device)
latents_noisy = self.train_scheduler.add_noise(gen_latents, noise, t).to(self.device)
latents_noisy = rearrange(latents_noisy, "(b n_pbrs n) c h w -> b n_pbrs n c h w", b=B, n_pbrs=N_pbrs)
cached_condition = {}
if normal_imgs is not None:
normal_embeds = self.encode_images(normal_imgs[0])
cached_condition["embeds_normal"] = normal_embeds #! B, N, C, H, W
if position_imgs is not None:
position_embeds = self.encode_images(position_imgs[0])
cached_condition["embeds_position"] = position_embeds #! B, N, C, H, W
cached_condition["position_maps"] = position_imgs[0] #! B, N, C, H, W
for b in range(B):
prob = np.random.rand()
if prob < self.drop_cond_prob:
if "normal_imgs" in cached_condition:
cached_condition["embeds_normal"][b, ...] = torch.zeros_like(
cached_condition["embeds_normal"][b, ...]
)
if "position_imgs" in cached_condition:
cached_condition["embeds_position"][b, ...] = torch.zeros_like(
cached_condition["embeds_position"][b, ...]
)
prob = np.random.rand()
if prob < self.drop_cond_prob:
if "position_maps" in cached_condition:
cached_condition["position_maps"][b, ...] = torch.zeros_like(
cached_condition["position_maps"][b, ...]
)
prob = np.random.rand()
if prob < self.drop_cond_prob:
dino_hidden_states[b, ...] = torch.zeros_like(dino_hidden_states[b, ...])
prob = np.random.rand()
if prob < self.drop_cond_prob:
dino_hidden_states_another[b, ...] = torch.zeros_like(dino_hidden_states_another[b, ...])
# MVA & Ref Attention
prob = np.random.rand()
cached_condition["mva_scale"] = 1.0
cached_condition["ref_scale"] = 1.0
if prob < self.drop_cond_prob:
cached_condition["mva_scale"] = 0.0
cached_condition["ref_scale"] = 0.0
elif prob > 1.0 - self.drop_cond_prob:
prob = np.random.rand()
if prob < 0.5:
cached_condition["mva_scale"] = 0.0
else:
cached_condition["ref_scale"] = 0.0
else:
pass
if self.train_scheduler.config.prediction_type == "v_prediction":
cached_condition["shading_embeds"] = shading_embeds
cached_condition["ref_latents"] = ref_latents
cached_condition["dino_hidden_states"] = dino_hidden_states
v_pred = self.forward_unet(latents_noisy, t, **cached_condition)
v_pred_albedo, v_pred_mr = torch.split(
rearrange(
v_pred, "(b n_pbr n) c h w -> b n_pbr n c h w", n_pbr=len(self.pbr_settings), n=self.num_view
),
1,
dim=1,
)
v_target = self.get_v(gen_latents, noise, t)
v_target_albedo, v_target_mr = torch.split(
rearrange(
v_target, "(b n_pbr n) c h w -> b n_pbr n c h w", n_pbr=len(self.pbr_settings), n=self.num_view
),
1,
dim=1,
)
albedo_loss_1, _ = self.compute_loss(v_pred_albedo, v_target_albedo)
mr_loss_1, _ = self.compute_loss(v_pred_mr, v_target_mr)
cached_condition["ref_latents"] = ref_latents_another
cached_condition["dino_hidden_states"] = dino_hidden_states_another
v_pred_another = self.forward_unet(latents_noisy, t, **cached_condition)
v_pred_another_albedo, v_pred_another_mr = torch.split(
rearrange(
v_pred_another,
"(b n_pbr n) c h w -> b n_pbr n c h w",
n_pbr=len(self.pbr_settings),
n=self.num_view,
),
1,
dim=1,
)
albedo_loss_2, _ = self.compute_loss(v_pred_another_albedo, v_target_albedo)
mr_loss_2, _ = self.compute_loss(v_pred_another_mr, v_target_mr)
consistency_loss, _ = self.compute_loss(v_pred_another, v_pred)
albedo_loss = (albedo_loss_1 + albedo_loss_2) * 0.5
mr_loss = (mr_loss_1 + mr_loss_2) * 0.5
log_loss_dict = {}
log_loss_dict.update({f"train/albedo_loss": albedo_loss})
log_loss_dict.update({f"train/mr_loss": mr_loss})
log_loss_dict.update({f"train/cons_loss": consistency_loss})
loss_dict = log_loss_dict
elif self.train_scheduler.config.prediction_type == "epsilon":
e_pred = self.forward_unet(latents_noisy, t, **cached_condition)
loss, loss_dict = self.compute_loss(e_pred, noise)
else:
raise f"No {self.train_scheduler.config.prediction_type}"
# logging
self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False)
lr = self.optimizers().param_groups[0]["lr"]
self.log("lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
return 0.85 * (albedo_loss + mr_loss) + 0.15 * consistency_loss
def compute_loss(self, noise_pred, noise_gt):
loss = F.mse_loss(noise_pred, noise_gt)
prefix = "train"
loss_dict = {}
loss_dict.update({f"{prefix}/loss": loss})
return loss, loss_dict
@torch.no_grad()
def validation_step(self, batch, batch_idx):
"""Performs validation on a single batch.
Generates predicted images using:
1. Reference conditioning images
2. Optional normal/position maps
3. Frozen DINO features (if enabled)
4. Text prompt conditioning
Compares predictions against ground truth targets and prepares visualization.
Stores results for epoch-level aggregation.
Args:
batch: Input batch from validation dataloader
batch_idx: Index of current batch
"""
# [Validation image generation and comparison logic...]
# Key steps:
# 1. Preprocess conditioning images to PIL format
# 2. Set up conditioning inputs (normal maps, position maps, DINO features)
# 3. Run pipeline inference with fixed prompt ("high quality")
# 4. Decode latent outputs to image space
# 5. Arrange predictions and ground truths for visualization
cond_imgs_tensor, _, target_imgs, normal_imgs, position_imgs = self.prepare_batch_data(batch)
resolution = self.view_size
image_pils = []
for i in range(cond_imgs_tensor.shape[0]):
image_pils.append([])
for j in range(cond_imgs_tensor.shape[1]):
image_pils[-1].append(v2.functional.to_pil_image(cond_imgs_tensor[i, j, ...]))
outputs, gts = [], []
for idx in range(len(image_pils)):
cond_imgs = image_pils[idx]
cached_condition = dict(num_in_batch=self.num_view, N_pbrs=len(self.pbr_settings))
if normal_imgs is not None:
cached_condition["images_normal"] = normal_imgs[0][idx, ...].unsqueeze(0)
if position_imgs is not None:
cached_condition["images_position"] = position_imgs[0][idx, ...].unsqueeze(0)
if self.pipeline.unet.use_dino:
dino_hidden_states = self.dino_v2([cond_imgs][0])
cached_condition["dino_hidden_states"] = dino_hidden_states
latent = self.pipeline(
cond_imgs,
prompt="high quality",
num_inference_steps=30,
output_type="latent",
height=resolution,
width=resolution,
**cached_condition,
).images
image = self.pipeline.vae.decode(latent / self.pipeline.vae.config.scaling_factor, return_dict=False)[
0
] # [-1, 1]
image = (image * 0.5 + 0.5).clamp(0, 1)
image = rearrange(
image, "(b n_pbr n) c h w -> b n_pbr n c h w", n_pbr=len(self.pbr_settings), n=self.num_view
)
image = torch.cat((torch.ones_like(image[:, :, :1, ...]) * 0.5, image), dim=2)
image = rearrange(image, "b n_pbr n c h w -> (b n_pbr n) c h w")
image = rearrange(
image,
"(b n_pbr n) c h w -> b c (n_pbr h) (n w)",
b=1,
n_pbr=len(self.pbr_settings),
n=self.num_view + 1,
)
outputs.append(image)
all_target_pbrs = []
for pbr_token in self.pbr_settings:
all_target_pbrs.append(target_imgs[pbr_token])
all_target_pbrs = torch.stack(all_target_pbrs, dim=0).transpose(1, 0)
all_target_pbrs = torch.cat(
(cond_imgs_tensor.unsqueeze(1).repeat(1, len(self.pbr_settings), 1, 1, 1, 1), all_target_pbrs), dim=2
)
all_target_pbrs = rearrange(all_target_pbrs, "b n_pbrs n c h w -> b c (n_pbrs h) (n w)")
gts = all_target_pbrs
outputs = torch.cat(outputs, dim=0).to(self.device)
images = torch.cat([gts, outputs], dim=-2)
self.validation_step_outputs.append(images)
@torch.no_grad()
def on_validation_epoch_end(self):
"""Aggregates validation results at epoch end.
Gathers outputs from all GPUs (if distributed training),
creates a unified visualization grid, and saves to disk.
Only rank 0 process performs saving.
"""
# [Result aggregation and visualization...]
# Key steps:
# 1. Gather validation outputs from all processes
# 2. Create image grid combining ground truths and predictions
# 3. Save visualization with step-numbered filename
# 4. Clear memory for next validation cycle
images = torch.cat(self.validation_step_outputs, dim=0)
all_images = self.all_gather(images)
all_images = rearrange(all_images, "r b c h w -> (r b) c h w")
if self.global_rank == 0:
grid = make_grid(all_images, nrow=8, normalize=True, value_range=(0, 1))
save_image(grid, os.path.join(self.logdir, "images_val", f"val_{self.global_step:07d}.png"))
self.validation_step_outputs.clear() # free memory
def configure_optimizers(self):
lr = self.learning_rate
optimizer = torch.optim.AdamW(self.unet.parameters(), lr=lr)
def lr_lambda(step):
warm_up_step = 1000
T_step = 9000
gamma = 0.9
min_lr = 0.1 if step >= warm_up_step else 0.0
max_lr = 1.0
normalized_step = step % (warm_up_step + T_step)
current_max_lr = max_lr * gamma ** (step // (warm_up_step + T_step))
if current_max_lr < min_lr:
current_max_lr = min_lr
if normalized_step < warm_up_step:
lr_step = min_lr + (normalized_step / warm_up_step) * (current_max_lr - min_lr)
else:
step_wc_wp = normalized_step - warm_up_step
ratio = step_wc_wp / T_step
lr_step = min_lr + 0.5 * (current_max_lr - min_lr) * (1 + math.cos(math.pi * ratio))
return lr_step
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
lr_scheduler_config = {
"scheduler": lr_scheduler,
"interval": "step",
"frequency": 1,
"monitor": "val_loss",
"strict": False,
"name": None,
}
return {"optimizer": optimizer, "lr_scheduler": lr_scheduler_config}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,736 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
from typing import Any, Dict, Optional
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers import KarrasDiffusionSchedulers
import numpy
import torch
import torch.utils.checkpoint
import torch.distributed
import numpy as np
import transformers
from PIL import Image
from einops import rearrange
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
import diffusers
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
StableDiffusionPipeline,
retrieve_timesteps,
rescale_noise_cfg,
)
from diffusers.utils import deprecate
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
from diffusers.image_processor import PipelineImageInput
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
from .modules import UNet2p5DConditionModel
from .attn_processor import SelfAttnProcessor2_0, RefAttnProcessor2_0, PoseRoPEAttnProcessor2_0
__all__ = [
"HunyuanPaintPipeline",
"UNet2p5DConditionModel",
"SelfAttnProcessor2_0",
"RefAttnProcessor2_0",
"PoseRoPEAttnProcessor2_0",
]
def to_rgb_image(maybe_rgba: Image.Image):
if maybe_rgba.mode == "RGB":
return maybe_rgba
elif maybe_rgba.mode == "RGBA":
rgba = maybe_rgba
img = numpy.random.randint(127, 128, size=[rgba.size[1], rgba.size[0], 3], dtype=numpy.uint8)
img = Image.fromarray(img, "RGB")
img.paste(rgba, mask=rgba.getchannel("A"))
return img
else:
raise ValueError("Unsupported image type.", maybe_rgba.mode)
class HunyuanPaintPipeline(StableDiffusionPipeline):
"""Custom pipeline for multiview PBR texture generation.
Extends Stable Diffusion with:
- Material-specific conditioning
- Multiview processing
- Position-aware attention
- 2.5D UNet integration
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
feature_extractor: CLIPImageProcessor,
safety_checker=None,
use_torch_compile=False,
):
DiffusionPipeline.__init__(self)
safety_checker = None
self.register_modules(
vae=torch.compile(vae) if use_torch_compile else vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=torch.compile(feature_extractor) if use_torch_compile else feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
if isinstance(self.unet, UNet2DConditionModel):
self.unet = UNet2p5DConditionModel(self.unet, None, self.scheduler)
def eval(self):
self.unet.eval()
self.vae.eval()
def set_pbr_settings(self, pbr_settings: List[str]):
self.pbr_settings = pbr_settings
def set_learned_parameters(self):
"""Configures parameter freezing strategy.
Freezes:
- Standard attention layers
- Dual-stream reference UNet
Unfreezes:
- Material-specific parameters
- DINO integration components
"""
freezed_names = ["attn1", "unet_dual"]
added_learned_names = ["albedo", "mr", "dino"]
for name, params in self.unet.named_parameters():
if any(freeze_name in name for freeze_name in freezed_names) and all(
learned_name not in name for learned_name in added_learned_names
):
params.requires_grad = False
else:
params.requires_grad = True
def prepare(self):
if isinstance(self.unet, UNet2DConditionModel):
self.unet = UNet2p5DConditionModel(self.unet, None, self.scheduler).eval()
@torch.no_grad()
def encode_images(self, images):
"""Encodes multiview image batches into latent space.
Args:
images: Input images [B, N_views, C, H, W]
Returns:
torch.Tensor: Latent representations [B, N_views, C, H_latent, W_latent]
"""
B = images.shape[0]
images = rearrange(images, "b n c h w -> (b n) c h w")
dtype = next(self.vae.parameters()).dtype
images = (images - 0.5) * 2.0
posterior = self.vae.encode(images.to(dtype)).latent_dist
latents = posterior.sample() * self.vae.config.scaling_factor
latents = rearrange(latents, "(b n) c h w -> b n c h w", b=B)
return latents
@torch.no_grad()
def __call__(
self,
images=None,
prompt=None,
negative_prompt="watermark, ugly, deformed, noisy, blurry, low contrast",
*args,
num_images_per_prompt: Optional[int] = 1,
guidance_scale=3.0,
output_type: Optional[str] = "pil",
width=512,
height=512,
num_inference_steps=15,
return_dict=True,
sync_condition=None,
**cached_condition,
):
"""Main generation method for multiview PBR textures.
Steps:
1. Input validation and preparation
2. Reference image encoding
3. Condition processing (normal/position maps)
4. Prompt embedding setup
5. Classifier-free guidance preparation
6. Diffusion sampling loop
Args:
images: List of reference PIL images
prompt: Text prompt (overridden by learned embeddings)
cached_condition: Dictionary containing:
- images_normal: Normal maps (PIL or tensor)
- images_position: Position maps (PIL or tensor)
Returns:
List[PIL.Image]: Generated multiview PBR textures
"""
self.prepare()
if images is None:
raise ValueError("Inputting embeddings not supported for this pipeline. Please pass an image.")
assert not isinstance(images, torch.Tensor)
if not isinstance(images, List):
images = [images]
images = [to_rgb_image(image) for image in images]
images_vae = [torch.tensor(np.array(image) / 255.0) for image in images]
images_vae = [image_vae.unsqueeze(0).permute(0, 3, 1, 2).unsqueeze(0) for image_vae in images_vae]
images_vae = torch.cat(images_vae, dim=1)
images_vae = images_vae.to(device=self.vae.device, dtype=self.unet.dtype)
batch_size = images_vae.shape[0]
N_ref = images_vae.shape[1]
assert batch_size == 1
assert num_images_per_prompt == 1
if self.unet.use_ra:
ref_latents = self.encode_images(images_vae)
cached_condition["ref_latents"] = ref_latents
def convert_pil_list_to_tensor(images):
bg_c = [1.0, 1.0, 1.0]
images_tensor = []
for batch_imgs in images:
view_imgs = []
for pil_img in batch_imgs:
img = numpy.asarray(pil_img, dtype=numpy.float32) / 255.0
if img.shape[2] > 3:
alpha = img[:, :, 3:]
img = img[:, :, :3] * alpha + bg_c * (1 - alpha)
img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).contiguous().half().to("cuda")
view_imgs.append(img)
view_imgs = torch.cat(view_imgs, dim=0)
images_tensor.append(view_imgs.unsqueeze(0))
images_tensor = torch.cat(images_tensor, dim=0)
return images_tensor
if "images_normal" in cached_condition:
if isinstance(cached_condition["images_normal"], List):
cached_condition["images_normal"] = convert_pil_list_to_tensor(cached_condition["images_normal"])
cached_condition["embeds_normal"] = self.encode_images(cached_condition["images_normal"])
if "images_position" in cached_condition:
if isinstance(cached_condition["images_position"], List):
cached_condition["images_position"] = convert_pil_list_to_tensor(cached_condition["images_position"])
cached_condition["position_maps"] = cached_condition["images_position"]
cached_condition["embeds_position"] = self.encode_images(cached_condition["images_position"])
if self.unet.use_learned_text_clip:
all_shading_tokens = []
for token in self.unet.pbr_setting:
all_shading_tokens.append(
getattr(self.unet, f"learned_text_clip_{token}").unsqueeze(dim=0).repeat(batch_size, 1, 1)
)
prompt_embeds = torch.stack(all_shading_tokens, dim=1)
negative_prompt_embeds = torch.stack(all_shading_tokens, dim=1)
# negative_prompt_embeds = torch.zeros_like(prompt_embeds)
else:
if prompt is None:
prompt = "high quality"
if isinstance(prompt, str):
prompt = [prompt for _ in range(batch_size)]
device = self._execution_device
prompt_embeds, _ = self.encode_prompt(
prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=False
)
if isinstance(negative_prompt, str):
negative_prompt = [negative_prompt for _ in range(batch_size)]
if negative_prompt is not None:
negative_prompt_embeds, _ = self.encode_prompt(
negative_prompt,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=False,
)
else:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
if guidance_scale > 1:
if self.unet.use_ra:
cached_condition["ref_latents"] = cached_condition["ref_latents"].repeat(
3, *([1] * (cached_condition["ref_latents"].dim() - 1))
)
cached_condition["ref_scale"] = torch.as_tensor([0.0, 1.0, 1.0]).to(cached_condition["ref_latents"])
if self.unet.use_dino:
zero_states = torch.zeros_like(cached_condition["dino_hidden_states"])
cached_condition["dino_hidden_states"] = torch.cat(
[zero_states, zero_states, cached_condition["dino_hidden_states"]]
)
del zero_states
if "embeds_normal" in cached_condition:
cached_condition["embeds_normal"] = cached_condition["embeds_normal"].repeat(
3, *([1] * (cached_condition["embeds_normal"].dim() - 1))
)
if "embeds_position" in cached_condition:
cached_condition["embeds_position"] = cached_condition["embeds_position"].repeat(
3, *([1] * (cached_condition["embeds_position"].dim() - 1))
)
if "position_maps" in cached_condition:
cached_condition["position_maps"] = cached_condition["position_maps"].repeat(
3, *([1] * (cached_condition["position_maps"].dim() - 1))
)
images = self.denoise(
None,
*args,
cross_attention_kwargs=None,
guidance_scale=guidance_scale,
num_images_per_prompt=num_images_per_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
num_inference_steps=num_inference_steps,
output_type=output_type,
width=width,
height=height,
return_dict=return_dict,
**cached_condition,
)
return images
def denoise(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
sigmas: List[float] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
sigmas (`List[float]`, *optional*):
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
will be used.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
provided, embeddings are computed from the `ip_adapter_image` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`]
(https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
Core denoising procedure for multiview PBR texture generation.
Handles the complete diffusion process including:
- Input validation and preparation
- Timestep scheduling
- Latent noise initialization
- Iterative denoising with specialized guidance
- Output decoding and post-processing
Key innovations:
1. Triple-batch classifier-free guidance:
- Negative (unconditional)
- Reference-conditioned
- Full-conditioned
2. View-dependent guidance scaling:
- Adjusts influence based on camera azimuth
3. PBR-aware latent organization:
- Maintains material/view separation throughout
4. Optimized VRAM management:
- Selective tensor reshaping
Processing Stages:
1. Setup & Validation: Configures pipeline components and validates inputs
2. Prompt Encoding: Processes text/material conditioning
3. Latent Initialization: Prepares noise for denoising process
4. Iterative Denoising:
a) Scales and organizes latent variables
b) Predicts noise at current timestep
c) Applies view-dependent guidance
d) Computes previous latent state
5. Output Decoding: Converts latents to final images
6. Cleanup: Releases resources and formats output
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
# open cache
kwargs["cache"] = {}
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated,"
"consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated,"
"consider using `callback_on_step_end`",
)
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# to deal with lora scaling and other possible forward hooks
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
ip_adapter_image,
ip_adapter_image_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
"""
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)'
"""
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
# prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds])
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
image_embeds = self.prepare_ip_adapter_image_embeds(
ip_adapter_image,
ip_adapter_image_embeds,
device,
batch_size * num_images_per_prompt,
self.do_classifier_free_guidance,
)
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(
self.scheduler, num_inference_steps, device, timesteps, sigmas
)
assert num_images_per_prompt == 1
# 5. Prepare latent variables
n_pbr = len(self.unet.pbr_setting)
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * kwargs["num_in_batch"] * n_pbr, # num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter
added_cond_kwargs = (
{"image_embeds": image_embeds}
if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
else None
)
# 6.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if self.interrupt:
continue
# expand the latents if we are doing classifier free guidance
latents = rearrange(
latents, "(b n_pbr n) c h w -> b n_pbr n c h w", n=kwargs["num_in_batch"], n_pbr=n_pbr
)
# latent_model_input = torch.cat([latents] * 3) if self.do_classifier_free_guidance else latents
latent_model_input = latents.repeat(3, 1, 1, 1, 1, 1) if self.do_classifier_free_guidance else latents
latent_model_input = rearrange(latent_model_input, "b n_pbr n c h w -> (b n_pbr n) c h w")
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
latent_model_input = rearrange(
latent_model_input, "(b n_pbr n) c h w ->b n_pbr n c h w", n=kwargs["num_in_batch"], n_pbr=n_pbr
)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
**kwargs,
)[0]
latents = rearrange(latents, "b n_pbr n c h w -> (b n_pbr n) c h w")
# perform guidance
if self.do_classifier_free_guidance:
# noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
# noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
noise_pred_uncond, noise_pred_ref, noise_pred_full = noise_pred.chunk(3)
if "camera_azims" in kwargs.keys():
camera_azims = kwargs["camera_azims"]
else:
camera_azims = [0] * kwargs["num_in_batch"]
def cam_mapping(azim):
if azim < 90 and azim >= 0:
return float(azim) / 90.0 + 1
elif azim >= 90 and azim < 330:
return 2.0
else:
return -float(azim) / 90.0 + 5.0
view_scale_tensor = (
torch.from_numpy(np.asarray([cam_mapping(azim) for azim in camera_azims]))
.unsqueeze(0)
.repeat(n_pbr, 1)
.view(-1)
.to(noise_pred_uncond)[:, None, None, None]
)
noise_pred = noise_pred_uncond + self.guidance_scale * view_scale_tensor * (
noise_pred_ref - noise_pred_uncond
)
noise_pred += self.guidance_scale * view_scale_tensor * (noise_pred_full - noise_pred_ref)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_ref, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred, t, latents[:, :num_channels_latents, :, :], **extra_step_kwargs, return_dict=False
)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)

13
hy3dpaint/src/__init__.py Executable file
View File

@@ -0,0 +1,13 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.

13
hy3dpaint/src/data/__init__.py Executable file
View File

@@ -0,0 +1,13 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.

View File

@@ -0,0 +1,219 @@
#!/usr/bin/env python3
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import os
import cv2
import json
import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image, ImageOps, ImageChops
class BaseDataset(Dataset):
def __init__(self, json_path, num_view=4, image_size=512):
self.data = list()
self.num_view = num_view
self.image_size = image_size
if isinstance(json_path, str):
json_path = [json_path]
for jp in json_path:
with open(jp) as f:
self.data.extend(json.load(f))
print("============= length of dataset %d =============" % len(self.data))
def __len__(self):
return len(self.data)
def load_image(self, pil_img, color, image_size=None):
if image_size is None:
image_size = self.image_size
if isinstance(pil_img, str):
pil_img = Image.open(pil_img)
else:
pil_img = pil_img
if pil_img.mode == "L":
pil_img = pil_img.convert("RGB")
pil_img = pil_img.resize((image_size, image_size))
image = np.asarray(pil_img, dtype=np.float32) / 255.0
if image.shape[2] == 3:
image = image[:, :, :3]
alpha = np.ones_like(image)
else:
alpha = image[:, :, 3:]
image = image[:, :, :3] * alpha + color * (1 - alpha)
image = torch.from_numpy(image).permute(2, 0, 1).contiguous().float()
alpha = torch.from_numpy(alpha).permute(2, 0, 1).contiguous().float()
return image, alpha
def _apply_scaling(self, image, scale_factor, width, height, bg_color, scale_width=True):
"""Apply scaling to image with proper cropping or padding."""
if scale_width:
new_width = int(width * scale_factor)
new_height = height
else:
new_width = width
new_height = int(height * scale_factor)
image = image.resize((new_width, new_height), resample=Image.BILINEAR)
if scale_factor > 1.0:
# Crop to original size
left = (new_width - width) // 2
top = (new_height - height) // 2
image = image.crop((left, top, left + width, top + height))
else:
# Pad to original size
pad_width = (width - new_width) // 2
pad_height = (height - new_height) // 2
image = ImageOps.expand(
image,
(
pad_width,
pad_height,
width - new_width - pad_width,
height - new_height - pad_height,
),
fill=bg_color,
)
return image
def _apply_rotation(self, image, bg_color):
"""Apply random rotation to image."""
original_size = image.size
angle = random.uniform(-30, 30)
image = image.convert("RGBA")
rotated_image = image.rotate(angle, resample=Image.BILINEAR, expand=True)
# Create background with bg_color
background = Image.new("RGBA", rotated_image.size, (bg_color[0], bg_color[1], bg_color[2], 255))
background.paste(rotated_image, (0, 0), rotated_image)
image = background.convert("RGB")
# Crop to original size
left = (image.width - original_size[0]) // 2
top = (image.height - original_size[1]) // 2
right = left + original_size[0]
bottom = top + original_size[1]
return image.crop((left, top, right, bottom))
def _apply_translation(self, image, bg_color):
"""Apply random translation to image."""
max_dx = 0.1 * image.size[0]
max_dy = 0.1 * image.size[1]
dx = int(random.uniform(-max_dx, max_dx))
dy = int(random.uniform(-max_dy, max_dy))
image = ImageChops.offset(image, dx, dy)
# Fill edges
width, height = image.size
if dx > 0:
image.paste(bg_color, (0, 0, dx, height))
elif dx < 0:
image.paste(bg_color, (width + dx, 0, width, height))
if dy > 0:
image.paste(bg_color, (0, 0, width, dy))
elif dy < 0:
image.paste(bg_color, (0, height + dy, width, height))
return image
def _apply_perspective(self, image, bg_color):
"""Apply random perspective transformation to image."""
image_np = np.array(image)
height, width = image_np.shape[:2]
# Define original and new points
original_points = np.float32([[0, 0], [width, 0], [width, height], [0, height]])
perspective_scale = 0.2
new_points = np.float32(
[
[random.uniform(0, width * perspective_scale), random.uniform(0, height * perspective_scale)],
[random.uniform(width * (1 - perspective_scale), width), random.uniform(0, height * perspective_scale)],
[
random.uniform(width * (1 - perspective_scale), width),
random.uniform(height * (1 - perspective_scale), height),
],
[
random.uniform(0, width * perspective_scale),
random.uniform(height * (1 - perspective_scale), height),
],
]
)
matrix = cv2.getPerspectiveTransform(original_points, new_points)
image_np = cv2.warpPerspective(
image_np, matrix, (width, height), borderMode=cv2.BORDER_CONSTANT, borderValue=bg_color
)
return Image.fromarray(image_np)
def augment_image(
self,
image,
bg_color,
identity_prob=0.5,
rotate_prob=0.3,
scale_prob=0.5,
translate_prob=0.5,
perspective_prob=0.3,
):
if random.random() < identity_prob:
return image
# Convert torch tensors back to PIL images for augmentation
image = Image.fromarray((image.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8))
bg_color = (int(bg_color[0] * 255), int(bg_color[1] * 255), int(bg_color[2] * 255))
# Random rotation
if random.random() < rotate_prob:
image = self._apply_rotation(image, bg_color)
# Random scaling
if random.random() < scale_prob:
width, height = image.size
scale_factor = random.uniform(0.8, 1.2)
if random.random() < 0.5:
# Scale both dimensions proportionally
image = self._apply_scaling(image, scale_factor, width, height, bg_color, scale_width=True)
image = self._apply_scaling(image, scale_factor, width, height, bg_color, scale_width=False)
else:
# Scale width then height independently
scale_factor_w = random.uniform(0.8, 1.2)
scale_factor_h = random.uniform(0.8, 1.2)
image = self._apply_scaling(image, scale_factor_w, width, height, bg_color, scale_width=True)
image = self._apply_scaling(image, scale_factor_h, width, height, bg_color, scale_width=False)
# Random translation
if random.random() < translate_prob:
image = self._apply_translation(image, bg_color)
# Random perspective
if random.random() < perspective_prob:
image = self._apply_perspective(image, bg_color)
# Convert back to torch tensors
image = image.convert("RGB")
image = np.asarray(image, dtype=np.float32) / 255.0
image = torch.from_numpy(image).permute(2, 0, 1).contiguous().float()
return image

View File

@@ -0,0 +1,146 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import os
import time
import glob
import json
import random
import numpy as np
import torch
from .loader_util import BaseDataset
class TextureDataset(BaseDataset):
def __init__(
self, json_path, num_view=6, image_size=512, lighting_suffix_pool=["light_PL", "light_AL", "light_ENVMAP"]
):
self.data = list()
self.num_view = num_view
self.image_size = image_size
self.lighting_suffix_pool = lighting_suffix_pool
if isinstance(json_path, str):
json_path = [json_path]
for jp in json_path:
with open(jp) as f:
self.data.extend(json.load(f))
print("============= length of dataset %d =============" % len(self.data))
def __getitem__(self, index):
try_sleep_interval = 20
total_try_num = 100
cnt = try_sleep_interval * total_try_num
# try:
images_ref = list()
images_albedo = list()
images_mr = list()
images_normal = list()
images_position = list()
bg_white = [1.0, 1.0, 1.0]
bg_black = [0.0, 0.0, 0.0]
bg_gray = [127 / 255.0, 127 / 255.0, 127 / 255.0]
dirx = self.data[index]
condition_dict = {}
# 6view
fix_num_view = self.num_view
available_views = []
for ext in ["*_albedo.png", "*_albedo.jpg", "*_albedo.jpeg"]:
available_views.extend(glob.glob(os.path.join(dirx, "render_tex", ext)))
cond_images = (
glob.glob(os.path.join(dirx, "render_cond", "*.png"))
+ glob.glob(os.path.join(dirx, "render_cond", "*.jpg"))
+ glob.glob(os.path.join(dirx, "render_cond", "*.jpeg"))
)
# 确保有足够的样本
if len(available_views) < fix_num_view:
print(
f"Warning: Only {len(available_views)} views available, but {fix_num_view} requested."
"Using all available views."
)
images_gen = available_views
else:
images_gen = random.sample(available_views, fix_num_view)
if not cond_images:
raise ValueError(f"No condition images found in {os.path.join(dirx, 'render_cond')}")
ref_image_path = random.choice(cond_images)
light_suffix = None
for suffix in self.lighting_suffix_pool:
if suffix in ref_image_path:
light_suffix = suffix
break
if light_suffix is None:
raise ValueError(f"light suffix not found in {ref_image_path}")
ref_image_diff_light_path = random.choice(
[
ref_image_path.replace(light_suffix, tar_suffix)
for tar_suffix in self.lighting_suffix_pool
if tar_suffix != light_suffix
]
)
images_ref_paths = [ref_image_path, ref_image_diff_light_path]
# Data aug
bg_c_record = None
for i, image_ref in enumerate(images_ref_paths):
if random.random() < 0.6:
bg_c = bg_gray
else:
if random.random() < 0.5:
bg_c = bg_black
else:
bg_c = bg_white
if i == 0:
bg_c_record = bg_c
image, alpha = self.load_image(image_ref, bg_c_record)
image = self.augment_image(image, bg_c_record).float()
images_ref.append(image)
condition_dict["images_cond"] = torch.stack(images_ref, dim=0).float()
for i, image_gen in enumerate(images_gen):
images_albedo.append(self.augment_image(self.load_image(image_gen, bg_gray)[0], bg_gray))
images_mr.append(
self.augment_image(self.load_image(image_gen.replace("_albedo", "_mr"), bg_gray)[0], bg_gray)
)
images_normal.append(
self.augment_image(self.load_image(image_gen.replace("_albedo", "_normal"), bg_gray)[0], bg_gray)
)
images_position.append(
self.augment_image(self.load_image(image_gen.replace("_albedo", "_pos"), bg_gray)[0], bg_gray)
)
condition_dict["images_albedo"] = torch.stack(images_albedo, dim=0).float()
condition_dict["images_mr"] = torch.stack(images_mr, dim=0).float()
condition_dict["images_normal"] = torch.stack(images_normal, dim=0).float()
condition_dict["images_position"] = torch.stack(images_position, dim=0).float()
condition_dict["name"] = dirx # .replace('/', '_')
return condition_dict # (N, 3, H, W)
# except Exception as e:
# print(e, self.data[index])
# # exit()
if __name__ == "__main__":
dataset = TextureDataset(json_path=["../../../train_examples/examples.json"])
print("images_cond", dataset[0]["images_cond"].shape)
print("images_albedo", dataset[0]["images_albedo"].shape)
print("images_mr", dataset[0]["images_mr"].shape)
print("images_normal", dataset[0]["images_normal"].shape)
print("images_position", dataset[0]["images_position"].shape)
print("name", dataset[0]["name"])

View File

@@ -0,0 +1,10 @@
+-----------------+----------------------------------+
| Key | Value |
+-----------------+----------------------------------+
| images_cond | torch.Size([2, 2, 3, 512, 512]) |
| images_albedo | torch.Size([2, 6, 3, 512, 512]) |
| images_mr | torch.Size([2, 6, 3, 512, 512]) |
| images_normal | torch.Size([2, 6, 3, 512, 512]) |
| images_position | torch.Size([2, 6, 3, 512, 512]) |
| caption | ['high quality', 'high quality'] |
+-----------------+----------------------------------+

View File

@@ -0,0 +1,79 @@
#!/usr/bin/env python3
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import pytorch_lightning as pl
from torch.utils.data import Dataset, ConcatDataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(
self,
batch_size=8,
num_workers=4,
train=None,
validation=None,
test=None,
**kwargs,
):
super().__init__()
self.batch_size = batch_size
self.num_workers = num_workers
self.dataset_configs = dict()
if train is not None:
self.dataset_configs["train"] = train
if validation is not None:
self.dataset_configs["validation"] = validation
if test is not None:
self.dataset_configs["test"] = test
def setup(self, stage):
from src.utils.train_util import instantiate_from_config
if stage in ["fit"]:
dataset_dict = {}
for k in self.dataset_configs:
dataset_dict[k] = []
for loader in self.dataset_configs[k]:
dataset_dict[k].append(instantiate_from_config(loader))
self.datasets = dataset_dict
print(self.datasets)
else:
raise NotImplementedError
def train_dataloader(self):
datasets = ConcatDataset(self.datasets["train"])
sampler = DistributedSampler(datasets)
return DataLoader(
datasets,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
sampler=sampler,
prefetch_factor=2,
pin_memory=True,
)
def val_dataloader(self):
datasets = ConcatDataset(self.datasets["validation"])
sampler = DistributedSampler(datasets)
return DataLoader(datasets, batch_size=4, num_workers=self.num_workers, shuffle=False, sampler=sampler)
def test_dataloader(self):
datasets = ConcatDataset(self.datasets["test"])
return DataLoader(datasets, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False)

13
hy3dpaint/src/utils/__init__.py Executable file
View File

@@ -0,0 +1,13 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.

View File

@@ -0,0 +1,40 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import importlib
def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters())
if verbose:
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
return total_params
def instantiate_from_config(config):
if not "target" in config:
if config == "__is_first_stage__":
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)

View File

@@ -0,0 +1,193 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import os
import torch
import copy
import trimesh
import numpy as np
from PIL import Image
from typing import List
from DifferentiableRenderer.MeshRender import MeshRender
from utils.simplify_mesh_utils import remesh_mesh
from utils.multiview_utils import multiviewDiffusionNet
from utils.pipeline_utils import ViewProcessor
from utils.image_super_utils import imageSuperNet
from utils.uvwrap_utils import mesh_uv_wrap
from DifferentiableRenderer.mesh_utils import convert_obj_to_glb
import warnings
warnings.filterwarnings("ignore")
from diffusers.utils import logging as diffusers_logging
diffusers_logging.set_verbosity(50)
class Hunyuan3DPaintConfig:
def __init__(self, max_num_view, resolution):
self.device = "cuda"
self.multiview_cfg_path = "hy3dpaint/cfgs/hunyuan-paint-pbr.yaml"
self.multiview_cfg_path = "cfgs/hunyuan-paint-pbr.yaml"
self.multiview_pretrained_path = "tencent/Hunyuan3D-2.1"
self.dino_ckpt_path = "facebook/dinov2-giant"
self.realesrgan_ckpt_path = "ckpt/RealESRGAN_x4plus.pth"
self.raster_mode = "cr"
self.bake_mode = "back_sample"
self.render_size = 1024 * 2
self.texture_size = 1024 * 4
self.max_selected_view_num = max_num_view
self.resolution = resolution
self.bake_exp = 4
self.merge_method = "fast"
# view selection
self.candidate_camera_azims = [0, 90, 180, 270, 0, 180]
self.candidate_camera_elevs = [0, 0, 0, 0, 90, -90]
self.candidate_view_weights = [1, 0.1, 0.5, 0.1, 0.05, 0.05]
for azim in range(0, 360, 30):
self.candidate_camera_azims.append(azim)
self.candidate_camera_elevs.append(20)
self.candidate_view_weights.append(0.01)
self.candidate_camera_azims.append(azim)
self.candidate_camera_elevs.append(-20)
self.candidate_view_weights.append(0.01)
class Hunyuan3DPaintPipeline:
def __init__(self, config=None) -> None:
self.config = config if config is not None else Hunyuan3DPaintConfig()
self.models = {}
self.stats_logs = {}
self.render = MeshRender(
default_resolution=self.config.render_size,
texture_size=self.config.texture_size,
bake_mode=self.config.bake_mode,
raster_mode=self.config.raster_mode,
)
self.view_processor = ViewProcessor(self.config, self.render)
self.load_models()
def load_models(self):
torch.cuda.empty_cache()
self.models["super_model"] = imageSuperNet(self.config)
self.models["multiview_model"] = multiviewDiffusionNet(self.config)
print("Models Loaded.")
@torch.no_grad()
def __call__(self, mesh_path=None, image_path=None, output_mesh_path=None, use_remesh=True, save_glb=True):
"""Generate texture for 3D mesh using multiview diffusion"""
# Ensure image_prompt is a list
if isinstance(image_path, str):
image_prompt = Image.open(image_path)
elif isinstance(image_path, Image.Image):
image_prompt = image_path
if not isinstance(image_prompt, List):
image_prompt = [image_prompt]
else:
image_prompt = image_path
# Process mesh
path = os.path.dirname(mesh_path)
if use_remesh:
processed_mesh_path = os.path.join(path, "white_mesh_remesh.obj")
remesh_mesh(mesh_path, processed_mesh_path)
else:
processed_mesh_path = mesh_path
# Output path
if output_mesh_path is None:
output_mesh_path = os.path.join(path, f"textured_mesh.obj")
# Load mesh
mesh = trimesh.load(processed_mesh_path)
mesh = mesh_uv_wrap(mesh)
self.render.load_mesh(mesh=mesh)
########### View Selection #########
selected_camera_elevs, selected_camera_azims, selected_view_weights = self.view_processor.bake_view_selection(
self.config.candidate_camera_elevs,
self.config.candidate_camera_azims,
self.config.candidate_view_weights,
self.config.max_selected_view_num,
)
normal_maps = self.view_processor.render_normal_multiview(
selected_camera_elevs, selected_camera_azims, use_abs_coor=True
)
position_maps = self.view_processor.render_position_multiview(selected_camera_elevs, selected_camera_azims)
########## Style ###########
image_caption = "high quality"
image_style = []
for image in image_prompt:
image = image.resize((512, 512))
if image.mode == "RGBA":
white_bg = Image.new("RGB", image.size, (255, 255, 255))
white_bg.paste(image, mask=image.getchannel("A"))
image = white_bg
image_style.append(image)
image_style = [image.convert("RGB") for image in image_style]
########### Multiview ##########
multiviews_pbr = self.models["multiview_model"](
image_style,
normal_maps + position_maps,
prompt=image_caption,
custom_view_size=self.config.resolution,
resize_input=True,
)
########### Enhance ##########
enhance_images = {}
enhance_images["albedo"] = copy.deepcopy(multiviews_pbr["albedo"])
enhance_images["mr"] = copy.deepcopy(multiviews_pbr["mr"])
for i in range(len(enhance_images["albedo"])):
enhance_images["albedo"][i] = self.models["super_model"](enhance_images["albedo"][i])
enhance_images["mr"][i] = self.models["super_model"](enhance_images["mr"][i])
########### Bake ##########
for i in range(len(enhance_images)):
enhance_images["albedo"][i] = enhance_images["albedo"][i].resize(
(self.config.render_size, self.config.render_size)
)
enhance_images["mr"][i] = enhance_images["mr"][i].resize((self.config.render_size, self.config.render_size))
texture, mask = self.view_processor.bake_from_multiview(
enhance_images["albedo"], selected_camera_elevs, selected_camera_azims, selected_view_weights
)
mask_np = (mask.squeeze(-1).cpu().numpy() * 255).astype(np.uint8)
texture_mr, mask_mr = self.view_processor.bake_from_multiview(
enhance_images["mr"], selected_camera_elevs, selected_camera_azims, selected_view_weights
)
mask_mr_np = (mask_mr.squeeze(-1).cpu().numpy() * 255).astype(np.uint8)
########## inpaint ###########
texture = self.view_processor.texture_inpaint(texture, mask_np)
self.render.set_texture(texture, force_set=True)
if "mr" in enhance_images:
texture_mr = self.view_processor.texture_inpaint(texture_mr, mask_mr_np)
self.render.set_texture_mr(texture_mr)
self.render.save_mesh(output_mesh_path, downsample=True)
if save_glb:
convert_obj_to_glb(output_mesh_path, output_mesh_path.replace(".obj", ".glb"))
output_glb_path = output_mesh_path.replace(".obj", ".glb")
return output_mesh_path

401
hy3dpaint/train.py Normal file
View File

@@ -0,0 +1,401 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import torch
import os, sys
import argparse
import shutil
import subprocess
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn
from src.utils.train_util import instantiate_from_config
import warnings
warnings.filterwarnings("ignore")
from diffusers.utils import logging as diffusers_logging
diffusers_logging.set_verbosity(50)
@rank_zero_only
def rank_zero_print(*args):
print(*args)
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-r",
"--resume",
type=str,
default=None,
help="resume from checkpoint",
)
parser.add_argument(
"--resume_weights_only",
action="store_true",
help="only resume model weights",
)
parser.add_argument(
"-b",
"--base",
type=str,
default="base_config.yaml",
help="path to base configs",
)
parser.add_argument(
"-n",
"--name",
type=str,
default="",
help="experiment name",
)
parser.add_argument(
"--num_nodes",
type=int,
default=1,
help="number of nodes to use",
)
parser.add_argument(
"--gpus",
type=str,
default="0,",
help="gpu ids to use",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=42,
help="seed for seed_everything",
)
parser.add_argument(
"-l",
"--logdir",
type=str,
default="logs",
help="directory for logging data",
)
return parser
class SetupCallback(Callback):
def __init__(self, resume, logdir, ckptdir, cfgdir, config):
super().__init__()
self.resume = resume
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
def on_fit_start(self, trainer, pl_module):
if trainer.global_rank == 0:
# Create logdirs and save configs
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
rank_zero_print("Project config")
rank_zero_print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config, os.path.join(self.cfgdir, "project.yaml"))
class CodeSnapshot(Callback):
"""
Modified from https://github.com/threestudio-project/threestudio/blob/main/threestudio/utils/callbacks.py#L60
"""
def __init__(self, savedir):
self.savedir = savedir
def get_file_list(self):
return [
b.decode()
for b in set(subprocess.check_output('git ls-files -- ":!:configs/*"', shell=True).splitlines())
| set( # hard code, TODO: use config to exclude folders or files
subprocess.check_output("git ls-files --others --exclude-standard", shell=True).splitlines()
)
]
@rank_zero_only
def save_code_snapshot(self):
os.makedirs(self.savedir, exist_ok=True)
# for f in self.get_file_list():
# if not os.path.exists(f) or os.path.isdir(f):
# continue
# os.makedirs(os.path.join(self.savedir, os.path.dirname(f)), exist_ok=True)
# shutil.copyfile(f, os.path.join(self.savedir, f))
def on_fit_start(self, trainer, pl_module):
try:
self.save_code_snapshot()
except:
rank_zero_warn(
"Code snapshot is not saved. Please make sure you have git installed and are in a git repository."
)
if __name__ == "__main__":
# add cwd for convenience and to make classes in this file available when
# running as `python main.py`
sys.path.append(os.getcwd())
torch.set_float32_matmul_precision("medium")
parser = get_parser()
opt, unknown = parser.parse_known_args()
cfg_fname = os.path.split(opt.base)[-1]
cfg_name = os.path.splitext(cfg_fname)[0]
exp_name = "-" + opt.name if opt.name != "" else ""
logdir = os.path.join(opt.logdir, cfg_name + exp_name)
# assert not os.path.exists(logdir) or 'test' in logdir, logdir
if os.path.exists(logdir) and opt.resume is None:
auto_resume_path = os.path.join(logdir, "checkpoints", "last.ckpt")
if os.path.exists(auto_resume_path):
opt.resume = auto_resume_path
print(f"Auto set resume ckpt {opt.resume}")
ckptdir = os.path.join(logdir, "checkpoints")
cfgdir = os.path.join(logdir, "configs")
codedir = os.path.join(logdir, "code")
node_rank = int(os.environ.get("NODE_RANK", 0)) # 当前节点的编号
local_rank = int(os.environ.get("LOCAL_RANK", 0)) # 当前节点上的 GPU 编号
num_gpus_per_node = torch.cuda.device_count() # 每个节点上的 GPU 数量
global_rank = node_rank * num_gpus_per_node + local_rank
seed_everything(opt.seed + global_rank)
# init configs
config = OmegaConf.load(opt.base)
lightning_config = config.lightning
trainer_config = lightning_config.trainer
trainer_config["accelerator"] = "gpu"
rank_zero_print(f"Running on GPUs {opt.gpus}")
try:
ngpu = int(opt.gpus)
except:
ngpu = len(opt.gpus.strip(",").split(","))
trainer_config["devices"] = ngpu
trainer_opt = argparse.Namespace(**trainer_config)
lightning_config.trainer = trainer_config
# model
model = instantiate_from_config(config.model)
model_unet = model.unet.unet
model_unet_prefix = "unet.unet."
if hasattr(model_unet, "unet"):
model_unet = model_unet.unet
model_unet_prefix += "unet."
if getattr(config, "init_unet_from", None):
unet_ckpt_path = config.init_unet_from
sd = torch.load(unet_ckpt_path, map_location="cpu")
model_unet.load_state_dict(sd, strict=True)
if getattr(config, "init_vae_from", None):
vae_ckpt_path = config.init_vae_from
sd_vae = torch.load(vae_ckpt_path, map_location="cpu")
def replace_key(key_str):
replace_pairs = [("key", "to_k"), ("query", "to_q"), ("value", "to_v"), ("proj_attn", "to_out.0")]
for replace_pair in replace_pairs:
key_str = key_str.replace(replace_pair[0], replace_pair[1])
return key_str
sd_vae = {replace_key(k): v for k, v in sd_vae.items()}
model.pipeline.vae.load_state_dict(sd_vae, strict=True)
if hasattr(model.unet, "controlnet"):
if getattr(config, "init_control_from", None):
unet_ckpt_path = config.init_control_from
sd_control = torch.load(unet_ckpt_path, map_location="cpu")
model.unet.controlnet.load(sd_control, strict=True)
noise_in_channels = config.model.params.get("noise_in_channels", None)
if noise_in_channels is not None:
with torch.no_grad():
new_conv_in = torch.nn.Conv2d(
noise_in_channels,
model_unet.conv_in.out_channels,
model_unet.conv_in.kernel_size,
model_unet.conv_in.stride,
model_unet.conv_in.padding,
)
new_conv_in.weight.zero_()
new_conv_in.weight[:, : model_unet.conv_in.in_channels, :, :].copy_(model_unet.conv_in.weight)
new_conv_in.bias.zero_()
new_conv_in.bias[: model_unet.conv_in.bias.size(0)].copy_(model_unet.conv_in.bias)
model_unet.conv_in = new_conv_in
if hasattr(model.unet, "controlnet"):
if config.model.params.get("control_in_channels", None):
control_in_channels = config.model.params.control_in_channels
model.unet.controlnet.config["conditioning_channels"] = control_in_channels
condition_conv_in = model.unet.controlnet.controlnet_cond_embedding.conv_in
new_condition_conv_in = torch.nn.Conv2d(
control_in_channels,
condition_conv_in.out_channels,
kernel_size=condition_conv_in.kernel_size,
stride=condition_conv_in.stride,
padding=condition_conv_in.padding,
)
with torch.no_grad():
new_condition_conv_in.weight[:, : condition_conv_in.in_channels, :, :] = condition_conv_in.weight
if condition_conv_in.bias is not None:
new_condition_conv_in.bias = condition_conv_in.bias
model.unet.controlnet.controlnet_cond_embedding.conv_in = new_condition_conv_in
rank_zero_print(f"Loaded Init ...")
if getattr(config, "resume_from", None):
cnet_ckpt_path = config.resume_from
sds = torch.load(cnet_ckpt_path, map_location="cpu")["state_dict"]
sd0 = {k[len(model_unet_prefix) :]: v for k, v in sds.items() if model_unet_prefix in k}
# model.unet.unet.unet.load_state_dict(sd0, strict=True)
model_unet.load_state_dict(sd0, strict=True)
if hasattr(model.unet, "controlnet"):
sd1 = {k[16:]: v for k, v in sds.items() if "unet.controlnet." in k}
model.unet.controlnet.load_state_dict(sd1, strict=True)
rank_zero_print(f"Loaded {cnet_ckpt_path} ...")
if opt.resume and opt.resume_weights_only:
model = model.__class__.load_from_checkpoint(opt.resume, **config.model.params)
model.logdir = logdir
# trainer and callbacks
trainer_kwargs = dict()
# logger
default_logger_cfg = {
"target": "pytorch_lightning.loggers.TensorBoardLogger",
"params": {
"name": "tensorboard",
"save_dir": logdir,
"version": "0",
},
}
logger_cfg = OmegaConf.merge(default_logger_cfg)
trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
# model checkpoint
default_modelckpt_cfg = {
"target": "pytorch_lightning.callbacks.ModelCheckpoint",
"params": {
"dirpath": ckptdir,
"filename": "{step:08}",
"verbose": True,
"save_last": True,
"every_n_train_steps": 5000,
"save_top_k": -1, # save all checkpoints
},
}
if "modelcheckpoint" in lightning_config:
modelckpt_cfg = lightning_config.modelcheckpoint
else:
modelckpt_cfg = OmegaConf.create()
modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
# callbacks
default_callbacks_cfg = {
"setup_callback": {
"target": "train.SetupCallback",
"params": {
"resume": opt.resume,
"logdir": logdir,
"ckptdir": ckptdir,
"cfgdir": cfgdir,
"config": config,
},
},
"learning_rate_logger": {
"target": "pytorch_lightning.callbacks.LearningRateMonitor",
"params": {
"logging_interval": "step",
},
},
"code_snapshot": {
"target": "train.CodeSnapshot",
"params": {
"savedir": codedir,
},
},
}
default_callbacks_cfg["checkpoint_callback"] = modelckpt_cfg
if "callbacks" in lightning_config:
callbacks_cfg = lightning_config.callbacks
else:
callbacks_cfg = OmegaConf.create()
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
trainer_kwargs["precision"] = "bf16"
trainer_kwargs["strategy"] = DDPStrategy(find_unused_parameters=False)
# trainer
trainer = Trainer(**trainer_config, **trainer_kwargs, num_nodes=opt.num_nodes, inference_mode=False)
trainer.logdir = logdir
# data
data = instantiate_from_config(config.data)
data.prepare_data()
data.setup("fit")
# configure learning rate
base_lr = config.model.base_learning_rate
if "accumulate_grad_batches" in lightning_config.trainer:
accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
else:
accumulate_grad_batches = 1
rank_zero_print(f"accumulate_grad_batches = {accumulate_grad_batches}")
lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
model.learning_rate = base_lr
rank_zero_print("++++ NOT USING LR SCALING ++++")
rank_zero_print(f"Setting learning rate to {model.learning_rate:.2e}")
# run training loop
if opt.resume and not opt.resume_weights_only:
trainer.fit(model, data, ckpt_path=opt.resume)
else:
trainer.fit(model, data)

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Some files were not shown because too many files have changed in this diff Show More