This commit is contained in:
Huiwenshi
2025-06-13 23:53:14 +08:00
parent 70ee89e0a2
commit c88bee648e
581 changed files with 30365 additions and 1 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import math
import numpy as np
import torch
def transform_pos(mtx, pos, keepdim=False):
t_mtx = torch.from_numpy(mtx).to(pos.device) if isinstance(mtx, np.ndarray) else mtx
if pos.shape[-1] == 3:
posw = torch.cat([pos, torch.ones([pos.shape[0], 1]).to(pos.device)], axis=1)
else:
posw = pos
if keepdim:
return torch.matmul(posw, t_mtx.t())[...]
else:
return torch.matmul(posw, t_mtx.t())[None, ...]
def get_mv_matrix(elev, azim, camera_distance, center=None):
elev = -elev
azim += 90
elev_rad = math.radians(elev)
azim_rad = math.radians(azim)
camera_position = np.array(
[
camera_distance * math.cos(elev_rad) * math.cos(azim_rad),
camera_distance * math.cos(elev_rad) * math.sin(azim_rad),
camera_distance * math.sin(elev_rad),
]
)
if center is None:
center = np.array([0, 0, 0])
else:
center = np.array(center)
lookat = center - camera_position
lookat = lookat / np.linalg.norm(lookat)
up = np.array([0, 0, 1.0])
right = np.cross(lookat, up)
right = right / np.linalg.norm(right)
up = np.cross(right, lookat)
up = up / np.linalg.norm(up)
c2w = np.concatenate([np.stack([right, up, -lookat], axis=-1), camera_position[:, None]], axis=-1)
w2c = np.zeros((4, 4))
w2c[:3, :3] = np.transpose(c2w[:3, :3], (1, 0))
w2c[:3, 3:] = -np.matmul(np.transpose(c2w[:3, :3], (1, 0)), c2w[:3, 3:])
w2c[3, 3] = 1.0
return w2c.astype(np.float32)
def get_orthographic_projection_matrix(left=-1, right=1, bottom=-1, top=1, near=0, far=2):
"""
计算正交投影矩阵。
参数:
left (float): 投影区域左侧边界。
right (float): 投影区域右侧边界。
bottom (float): 投影区域底部边界。
top (float): 投影区域顶部边界。
near (float): 投影区域近裁剪面距离。
far (float): 投影区域远裁剪面距离。
返回:
numpy.ndarray: 正交投影矩阵。
"""
ortho_matrix = np.eye(4, dtype=np.float32)
ortho_matrix[0, 0] = 2 / (right - left)
ortho_matrix[1, 1] = 2 / (top - bottom)
ortho_matrix[2, 2] = -2 / (far - near)
ortho_matrix[0, 3] = -(right + left) / (right - left)
ortho_matrix[1, 3] = -(top + bottom) / (top - bottom)
ortho_matrix[2, 3] = -(far + near) / (far - near)
return ortho_matrix
def get_perspective_projection_matrix(fovy, aspect_wh, near, far):
fovy_rad = math.radians(fovy)
return np.array(
[
[1.0 / (math.tan(fovy_rad / 2.0) * aspect_wh), 0, 0, 0],
[0, 1.0 / math.tan(fovy_rad / 2.0), 0, 0],
[0, 0, -(far + near) / (far - near), -2.0 * far * near / (far - near)],
[0, 0, -1, 0],
]
).astype(np.float32)

View File

@@ -0,0 +1 @@
c++ -O3 -Wall -shared -std=c++11 -fPIC `python -m pybind11 --includes` mesh_inpaint_processor.cpp -o mesh_inpaint_processor`python3-config --extension-suffix`

View File

@@ -0,0 +1,395 @@
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <algorithm>
#include <cmath>
#include <queue>
#include <vector>
#include <functional>
namespace py = pybind11;
using namespace std;
namespace {
// 内部数据结构避免重复的buffer获取和指针设置
struct MeshData {
int texture_height, texture_width, texture_channel;
int vtx_num;
float* texture_ptr;
uint8_t* mask_ptr;
float* vtx_pos_ptr;
float* vtx_uv_ptr;
int* pos_idx_ptr;
int* uv_idx_ptr;
// 存储buffer以防止被销毁
py::buffer_info texture_buf, mask_buf, vtx_pos_buf, vtx_uv_buf, pos_idx_buf, uv_idx_buf;
MeshData(py::array_t<float>& texture, py::array_t<uint8_t>& mask,
py::array_t<float>& vtx_pos, py::array_t<float>& vtx_uv,
py::array_t<int>& pos_idx, py::array_t<int>& uv_idx) {
texture_buf = texture.request();
mask_buf = mask.request();
vtx_pos_buf = vtx_pos.request();
vtx_uv_buf = vtx_uv.request();
pos_idx_buf = pos_idx.request();
uv_idx_buf = uv_idx.request();
texture_height = texture_buf.shape[0];
texture_width = texture_buf.shape[1];
texture_channel = texture_buf.shape[2];
texture_ptr = static_cast<float*>(texture_buf.ptr);
mask_ptr = static_cast<uint8_t*>(mask_buf.ptr);
vtx_num = vtx_pos_buf.shape[0];
vtx_pos_ptr = static_cast<float*>(vtx_pos_buf.ptr);
vtx_uv_ptr = static_cast<float*>(vtx_uv_buf.ptr);
pos_idx_ptr = static_cast<int*>(pos_idx_buf.ptr);
uv_idx_ptr = static_cast<int*>(uv_idx_buf.ptr);
}
};
// 公共函数计算UV坐标
pair<int, int> calculateUVCoordinates(int vtx_uv_idx, const MeshData& data) {
int uv_v = round(data.vtx_uv_ptr[vtx_uv_idx * 2] * (data.texture_width - 1));
int uv_u = round((1.0 - data.vtx_uv_ptr[vtx_uv_idx * 2 + 1]) * (data.texture_height - 1));
return make_pair(uv_u, uv_v);
}
// 公共函数:计算距离权重
float calculateDistanceWeight(const array<float, 3>& vtx_0, const array<float, 3>& vtx1) {
float dist_weight = 1.0f / max(
sqrt(
pow(vtx_0[0] - vtx1[0], 2) +
pow(vtx_0[1] - vtx1[1], 2) +
pow(vtx_0[2] - vtx1[2], 2)
), 1E-4);
return dist_weight * dist_weight;
}
// 公共函数:获取顶点位置
array<float, 3> getVertexPosition(int vtx_idx, const MeshData& data) {
return {data.vtx_pos_ptr[vtx_idx * 3],
data.vtx_pos_ptr[vtx_idx * 3 + 1],
data.vtx_pos_ptr[vtx_idx * 3 + 2]};
}
// 公共函数:构建图结构
void buildGraph(vector<vector<int>>& G, const MeshData& data) {
G.resize(data.vtx_num);
for(int i = 0; i < data.uv_idx_buf.shape[0]; ++i) {
for(int k = 0; k < 3; ++k) {
G[data.pos_idx_ptr[i * 3 + k]].push_back(data.pos_idx_ptr[i * 3 + (k + 1) % 3]);
}
}
}
// 通用初始化函数处理两种掩码类型float和int
template<typename MaskType>
void initializeVertexDataGeneric(const MeshData& data, vector<MaskType>& vtx_mask,
vector<vector<float>>& vtx_color, vector<int>* uncolored_vtxs = nullptr,
MaskType mask_value = static_cast<MaskType>(1)) {
vtx_mask.assign(data.vtx_num, static_cast<MaskType>(0));
vtx_color.assign(data.vtx_num, vector<float>(data.texture_channel, 0.0f));
if(uncolored_vtxs) {
uncolored_vtxs->clear();
}
for(int i = 0; i < data.uv_idx_buf.shape[0]; ++i) {
for(int k = 0; k < 3; ++k) {
int vtx_uv_idx = data.uv_idx_ptr[i * 3 + k];
int vtx_idx = data.pos_idx_ptr[i * 3 + k];
auto uv_coords = calculateUVCoordinates(vtx_uv_idx, data);
if(data.mask_ptr[uv_coords.first * data.texture_width + uv_coords.second] > 0) {
vtx_mask[vtx_idx] = mask_value;
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[vtx_idx][c] = data.texture_ptr[(uv_coords.first * data.texture_width +
uv_coords.second) * data.texture_channel + c];
}
} else if(uncolored_vtxs) {
uncolored_vtxs->push_back(vtx_idx);
}
}
}
}
// 通用平滑算法:支持不同的掩码类型和检查函数
template<typename MaskType>
void performSmoothingAlgorithm(const MeshData& data, const vector<vector<int>>& G,
vector<MaskType>& vtx_mask, vector<vector<float>>& vtx_color,
const vector<int>& uncolored_vtxs,
function<bool(MaskType)> is_colored_func,
function<void(MaskType&)> set_colored_func) {
int smooth_count = 2;
int last_uncolored_vtx_count = 0;
while(smooth_count > 0) {
int uncolored_vtx_count = 0;
for(int vtx_idx : uncolored_vtxs) {
vector<float> sum_color(data.texture_channel, 0.0f);
float total_weight = 0.0f;
array<float, 3> vtx_0 = getVertexPosition(vtx_idx, data);
for(int connected_idx : G[vtx_idx]) {
if(is_colored_func(vtx_mask[connected_idx])) {
array<float, 3> vtx1 = getVertexPosition(connected_idx, data);
float dist_weight = calculateDistanceWeight(vtx_0, vtx1);
for(int c = 0; c < data.texture_channel; ++c) {
sum_color[c] += vtx_color[connected_idx][c] * dist_weight;
}
total_weight += dist_weight;
}
}
if(total_weight > 0.0f) {
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[vtx_idx][c] = sum_color[c] / total_weight;
}
set_colored_func(vtx_mask[vtx_idx]);
} else {
uncolored_vtx_count++;
}
}
if(last_uncolored_vtx_count == uncolored_vtx_count) {
smooth_count--;
} else {
smooth_count++;
}
last_uncolored_vtx_count = uncolored_vtx_count;
}
}
// 前向传播算法的通用实现
void performForwardPropagation(const MeshData& data, const vector<vector<int>>& G,
vector<float>& vtx_mask, vector<vector<float>>& vtx_color,
queue<int>& active_vtxs) {
while(!active_vtxs.empty()) {
queue<int> pending_active_vtxs;
while(!active_vtxs.empty()) {
int vtx_idx = active_vtxs.front();
active_vtxs.pop();
array<float, 3> vtx_0 = getVertexPosition(vtx_idx, data);
for(int connected_idx : G[vtx_idx]) {
if(vtx_mask[connected_idx] > 0) continue;
array<float, 3> vtx1 = getVertexPosition(connected_idx, data);
float dist_weight = calculateDistanceWeight(vtx_0, vtx1);
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[connected_idx][c] += vtx_color[vtx_idx][c] * dist_weight;
}
if(vtx_mask[connected_idx] == 0) {
pending_active_vtxs.push(connected_idx);
}
vtx_mask[connected_idx] -= dist_weight;
}
}
while(!pending_active_vtxs.empty()) {
int vtx_idx = pending_active_vtxs.front();
pending_active_vtxs.pop();
for(int c = 0; c < data.texture_channel; ++c) {
vtx_color[vtx_idx][c] /= -vtx_mask[vtx_idx];
}
vtx_mask[vtx_idx] = 1.0f;
active_vtxs.push(vtx_idx);
}
}
}
// 公共函数:创建输出数组
pair<py::array_t<float>, py::array_t<uint8_t>> createOutputArrays(
const MeshData& data, const vector<float>& vtx_mask,
const vector<vector<float>>& vtx_color) {
py::array_t<float> new_texture(data.texture_buf.size);
py::array_t<uint8_t> new_mask(data.mask_buf.size);
auto new_texture_buf = new_texture.request();
auto new_mask_buf = new_mask.request();
float* new_texture_ptr = static_cast<float*>(new_texture_buf.ptr);
uint8_t* new_mask_ptr = static_cast<uint8_t*>(new_mask_buf.ptr);
// Copy original texture and mask to new arrays
copy(data.texture_ptr, data.texture_ptr + data.texture_buf.size, new_texture_ptr);
copy(data.mask_ptr, data.mask_ptr + data.mask_buf.size, new_mask_ptr);
for(int face_idx = 0; face_idx < data.uv_idx_buf.shape[0]; ++face_idx) {
for(int k = 0; k < 3; ++k) {
int vtx_uv_idx = data.uv_idx_ptr[face_idx * 3 + k];
int vtx_idx = data.pos_idx_ptr[face_idx * 3 + k];
if(vtx_mask[vtx_idx] == 1.0f) {
auto uv_coords = calculateUVCoordinates(vtx_uv_idx, data);
for(int c = 0; c < data.texture_channel; ++c) {
new_texture_ptr[
(uv_coords.first * data.texture_width + uv_coords.second) *
data.texture_channel + c
] = vtx_color[vtx_idx][c];
}
new_mask_ptr[uv_coords.first * data.texture_width + uv_coords.second] = 255;
}
}
}
// Reshape the new arrays to match the original texture and mask shapes
new_texture.resize({data.texture_height, data.texture_width, 3});
new_mask.resize({data.texture_height, data.texture_width});
return make_pair(new_texture, new_mask);
}
// 创建顶点颜色输出数组的专用函数
pair<py::array_t<float>, py::array_t<uint8_t>> createVertexColorOutput(
const MeshData& data, const vector<int>& vtx_mask,
const vector<vector<float>>& vtx_color) {
py::array_t<float> py_vtx_color({data.vtx_num, data.texture_channel});
py::array_t<uint8_t> py_vtx_mask({data.vtx_num});
auto py_vtx_color_buf = py_vtx_color.request();
auto py_vtx_mask_buf = py_vtx_mask.request();
float* py_vtx_color_ptr = static_cast<float*>(py_vtx_color_buf.ptr);
uint8_t* py_vtx_mask_ptr = static_cast<uint8_t*>(py_vtx_mask_buf.ptr);
for(int i = 0; i < data.vtx_num; ++i) {
py_vtx_mask_ptr[i] = vtx_mask[i];
for(int c = 0; c < data.texture_channel; ++c) {
py_vtx_color_ptr[i * data.texture_channel + c] = vtx_color[i][c];
}
}
return make_pair(py_vtx_color, py_vtx_mask);
}
} // anonymous namespace
// 重构后的 meshVerticeInpaint_smooth 函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint_smooth(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
MeshData data(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
vector<float> vtx_mask;
vector<vector<float>> vtx_color;
vector<int> uncolored_vtxs;
vector<vector<int>> G;
initializeVertexDataGeneric(data, vtx_mask, vtx_color, &uncolored_vtxs, 1.0f);
buildGraph(G, data);
// 使用通用平滑算法
performSmoothingAlgorithm<float>(data, G, vtx_mask, vtx_color, uncolored_vtxs,
[](float mask_val) { return mask_val > 0; }, // 检查是否着色
[](float& mask_val) { mask_val = 1.0f; } // 设置为已着色
);
return createOutputArrays(data, vtx_mask, vtx_color);
}
// 重构后的 meshVerticeInpaint_forward 函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint_forward(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
MeshData data(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
vector<float> vtx_mask;
vector<vector<float>> vtx_color;
vector<vector<int>> G;
queue<int> active_vtxs;
// 使用通用初始化(不需要 uncolored_vtxs
initializeVertexDataGeneric(data, vtx_mask, vtx_color, nullptr, 1.0f);
buildGraph(G, data);
// 收集活跃顶点
for(int i = 0; i < data.vtx_num; ++i) {
if(vtx_mask[i] == 1.0f) {
active_vtxs.push(i);
}
}
// 使用通用前向传播算法
performForwardPropagation(data, G, vtx_mask, vtx_color, active_vtxs);
return createOutputArrays(data, vtx_mask, vtx_color);
}
// 主接口函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeInpaint(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx, const string& method = "smooth") {
if(method == "smooth") {
return meshVerticeInpaint_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
} else if(method == "forward") {
return meshVerticeInpaint_forward(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
} else {
throw invalid_argument("Invalid method. Use 'smooth' or 'forward'.");
}
}
//============================
// 重构后的 meshVerticeColor_smooth 函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeColor_smooth(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx) {
MeshData data(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
vector<int> vtx_mask;
vector<vector<float>> vtx_color;
vector<int> uncolored_vtxs;
vector<vector<int>> G;
initializeVertexDataGeneric(data, vtx_mask, vtx_color, &uncolored_vtxs, 1);
buildGraph(G, data);
// 使用通用平滑算法
performSmoothingAlgorithm<int>(data, G, vtx_mask, vtx_color, uncolored_vtxs,
[](int mask_val) { return mask_val > 0; }, // 检查是否着色
[](int& mask_val) { mask_val = 2; } // 设置为已着色值为2
);
return createVertexColorOutput(data, vtx_mask, vtx_color);
}
// meshVerticeColor 主接口函数
pair<py::array_t<float>, py::array_t<uint8_t>> meshVerticeColor(
py::array_t<float> texture, py::array_t<uint8_t> mask, py::array_t<float> vtx_pos, py::array_t<float> vtx_uv,
py::array_t<int> pos_idx, py::array_t<int> uv_idx, const string& method = "smooth") {
if(method == "smooth") {
return meshVerticeColor_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
} else {
throw invalid_argument("Invalid method. Use 'smooth' or 'forward'.");
}
}
// Python绑定
PYBIND11_MODULE(mesh_inpaint_processor, m) {
m.def("meshVerticeInpaint", &meshVerticeInpaint, "A function to process mesh",
py::arg("texture"), py::arg("mask"), py::arg("vtx_pos"), py::arg("vtx_uv"),
py::arg("pos_idx"), py::arg("uv_idx"), py::arg("method") = "smooth");
m.def("meshVerticeColor", &meshVerticeColor, "A function to process mesh",
py::arg("texture"), py::arg("mask"), py::arg("vtx_pos"), py::arg("vtx_uv"),
py::arg("pos_idx"), py::arg("uv_idx"), py::arg("method") = "smooth");
}

View File

@@ -0,0 +1,284 @@
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.
# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
import os
import cv2
import bpy
import math
import numpy as np
from io import StringIO
from typing import Optional, Tuple, Dict, Any
def _safe_extract_attribute(obj: Any, attr_path: str, default: Any = None) -> Any:
"""Extract nested attribute safely from object."""
try:
for attr in attr_path.split("."):
obj = getattr(obj, attr)
return obj
except AttributeError:
return default
def _convert_to_numpy(data: Any, dtype: np.dtype) -> Optional[np.ndarray]:
"""Convert data to numpy array with specified dtype, handling None values."""
if data is None:
return None
return np.asarray(data, dtype=dtype)
def load_mesh(mesh):
"""Load mesh data including vertices, faces, UV coordinates and texture."""
# Extract vertex positions and face indices
vtx_pos = _safe_extract_attribute(mesh, "vertices")
pos_idx = _safe_extract_attribute(mesh, "faces")
# Extract UV coordinates (reusing face indices for UV indices)
vtx_uv = _safe_extract_attribute(mesh, "visual.uv")
uv_idx = pos_idx # Reuse face indices for UV mapping
# Convert to numpy arrays with appropriate dtypes
vtx_pos = _convert_to_numpy(vtx_pos, np.float32)
pos_idx = _convert_to_numpy(pos_idx, np.int32)
vtx_uv = _convert_to_numpy(vtx_uv, np.float32)
uv_idx = _convert_to_numpy(uv_idx, np.int32)
texture_data = None
return vtx_pos, pos_idx, vtx_uv, uv_idx, texture_data
def _get_base_path_and_name(mesh_path: str) -> Tuple[str, str]:
"""Get base path without extension and mesh name."""
base_path = os.path.splitext(mesh_path)[0]
name = os.path.basename(base_path)
return base_path, name
def _save_texture_map(
texture: np.ndarray,
base_path: str,
suffix: str = "",
image_format: str = ".jpg",
color_convert: Optional[int] = None,
) -> str:
"""Save texture map with optional color conversion."""
path = f"{base_path}{suffix}{image_format}"
processed_texture = (texture * 255).astype(np.uint8)
if color_convert is not None:
processed_texture = cv2.cvtColor(processed_texture, color_convert)
cv2.imwrite(path, processed_texture)
else:
cv2.imwrite(path, processed_texture[..., ::-1]) # RGB to BGR
return os.path.basename(path)
def _write_mtl_properties(f, properties: Dict[str, Any]):
"""Write material properties to MTL file."""
for key, value in properties.items():
if isinstance(value, (list, tuple)):
f.write(f"{key} {' '.join(map(str, value))}\n")
else:
f.write(f"{key} {value}\n")
def _create_obj_content(
vtx_pos: np.ndarray, vtx_uv: np.ndarray, pos_idx: np.ndarray, uv_idx: np.ndarray, name: str
) -> str:
"""Create OBJ file content."""
buffer = StringIO()
# Write header and vertices
buffer.write(f"mtllib {name}.mtl\no {name}\n")
np.savetxt(buffer, vtx_pos, fmt="v %.6f %.6f %.6f")
np.savetxt(buffer, vtx_uv, fmt="vt %.6f %.6f")
buffer.write("s 0\nusemtl Material\n")
# Write faces
pos_idx_plus1 = pos_idx + 1
uv_idx_plus1 = uv_idx + 1
face_format = np.frompyfunc(lambda *x: f"{int(x[0])}/{int(x[1])}", 2, 1)
faces = face_format(pos_idx_plus1, uv_idx_plus1)
face_strings = [f"f {' '.join(face)}" for face in faces]
buffer.write("\n".join(face_strings) + "\n")
return buffer.getvalue()
def save_obj_mesh(mesh_path, vtx_pos, pos_idx, vtx_uv, uv_idx, texture, metallic=None, roughness=None, normal=None):
"""Save mesh as OBJ file with textures and material."""
# Convert inputs to numpy arrays
vtx_pos = _convert_to_numpy(vtx_pos, np.float32)
vtx_uv = _convert_to_numpy(vtx_uv, np.float32)
pos_idx = _convert_to_numpy(pos_idx, np.int32)
uv_idx = _convert_to_numpy(uv_idx, np.int32)
base_path, name = _get_base_path_and_name(mesh_path)
# Create and save OBJ content
obj_content = _create_obj_content(vtx_pos, vtx_uv, pos_idx, uv_idx, name)
with open(mesh_path, "w") as obj_file:
obj_file.write(obj_content)
# Save texture maps
texture_maps = {}
texture_maps["diffuse"] = _save_texture_map(texture, base_path)
if metallic is not None:
texture_maps["metallic"] = _save_texture_map(metallic, base_path, "_metallic", color_convert=cv2.COLOR_RGB2GRAY)
if roughness is not None:
texture_maps["roughness"] = _save_texture_map(
roughness, base_path, "_roughness", color_convert=cv2.COLOR_RGB2GRAY
)
if normal is not None:
texture_maps["normal"] = _save_texture_map(normal, base_path, "_normal")
# Create MTL file
_create_mtl_file(base_path, texture_maps, metallic is not None)
def _create_mtl_file(base_path: str, texture_maps: Dict[str, str], is_pbr: bool):
"""Create MTL material file."""
mtl_path = f"{base_path}.mtl"
with open(mtl_path, "w") as f:
f.write("newmtl Material\n")
if is_pbr:
# PBR material properties
properties = {
"Kd": [0.800, 0.800, 0.800],
"Ke": [0.000, 0.000, 0.000], # 鐜鍏夐伄钄<E4BC84>
"Ni": 1.500, # 鎶樺皠绯绘暟
"d": 1.0, # 閫忔槑搴<E6A791>
"illum": 2, # 鍏夌収妯″瀷
"map_Kd": texture_maps["diffuse"],
}
_write_mtl_properties(f, properties)
# Additional PBR maps
map_configs = [("metallic", "map_Pm"), ("roughness", "map_Pr"), ("normal", "map_Bump -bm 1.0")]
for texture_key, mtl_key in map_configs:
if texture_key in texture_maps:
f.write(f"{mtl_key} {texture_maps[texture_key]}\n")
else:
# Standard material properties
properties = {
"Ns": 250.000000,
"Ka": [0.200, 0.200, 0.200],
"Kd": [0.800, 0.800, 0.800],
"Ks": [0.500, 0.500, 0.500],
"Ke": [0.000, 0.000, 0.000],
"Ni": 1.500,
"d": 1.0,
"illum": 3,
"map_Kd": texture_maps["diffuse"],
}
_write_mtl_properties(f, properties)
def save_mesh(mesh_path, vtx_pos, pos_idx, vtx_uv, uv_idx, texture, metallic=None, roughness=None, normal=None):
"""Save mesh using OBJ format."""
save_obj_mesh(
mesh_path, vtx_pos, pos_idx, vtx_uv, uv_idx, texture, metallic=metallic, roughness=roughness, normal=normal
)
def _setup_blender_scene():
"""Setup Blender scene for conversion."""
if "convert" not in bpy.data.scenes:
bpy.data.scenes.new("convert")
bpy.context.window.scene = bpy.data.scenes["convert"]
def _clear_scene_objects():
"""Clear all objects from current Blender scene."""
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.data.objects.remove(obj, do_unlink=True)
def _select_mesh_objects():
"""Select all mesh objects in scene."""
bpy.ops.object.select_all(action="DESELECT")
for obj in bpy.context.scene.objects:
if obj.type == "MESH":
obj.select_set(True)
def _merge_vertices_if_needed(merge_vertices: bool):
"""Merge duplicate vertices if requested."""
if not merge_vertices:
return
for obj in bpy.context.selected_objects:
if obj.type == "MESH":
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode="OBJECT")
def _apply_shading(shade_type: str, auto_smooth_angle: float):
"""Apply shading to selected objects."""
shading_ops = {
"SMOOTH": lambda: bpy.ops.object.shade_smooth(),
"FLAT": lambda: bpy.ops.object.shade_flat(),
"AUTO_SMOOTH": lambda: _apply_auto_smooth(auto_smooth_angle),
}
if shade_type in shading_ops:
shading_ops[shade_type]()
def _apply_auto_smooth(auto_smooth_angle: float):
"""Apply auto smooth based on Blender version."""
angle_rad = math.radians(auto_smooth_angle)
if bpy.app.version < (4, 1, 0):
bpy.ops.object.shade_smooth(use_auto_smooth=True, auto_smooth_angle=angle_rad)
elif bpy.app.version < (4, 2, 0):
bpy.ops.object.shade_smooth_by_angle(angle=angle_rad)
else:
bpy.ops.object.shade_auto_smooth(angle=angle_rad)
def convert_obj_to_glb(
obj_path: str,
glb_path: str,
shade_type: str = "SMOOTH",
auto_smooth_angle: float = 60,
merge_vertices: bool = False,
) -> bool:
"""Convert OBJ file to GLB format using Blender."""
try:
_setup_blender_scene()
_clear_scene_objects()
# Import OBJ file
bpy.ops.wm.obj_import(filepath=obj_path)
_select_mesh_objects()
# Process meshes
_merge_vertices_if_needed(merge_vertices)
_apply_shading(shade_type, auto_smooth_angle)
# Export to GLB
bpy.ops.export_scene.gltf(filepath=glb_path, use_active_scene=True)
return True
except Exception:
return False