text
stringlengths 15
267k
|
|---|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/project/-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unreal
def rename_assets(search_pattern, replace_pattern, case_sensitive):
"""
Renames selected assets if they have search_pattern in name and replaces it with replace_pattern and does it in
a case-sensitive way based on case_sensitive parameter.
Parameters:
:param search_pattern: the pattern to match an asset as a candidate for renaming.
:param replace_pattern: the text to use to replace search pattern if matched.
:param case_sensitive: True if you want the matching to be case-sensitive, false otherwise.
Returns:
:return renamed_asset_count: The number of assets renamed.
"""
# Get the libraries
system_lib = unreal.SystemLibrary()
editor_util = unreal.EditorUtilityLibrary()
string_lib = unreal.StringLibrary()
selected_assets = editor_util.get_selected_assets()
asset_count = len(selected_assets)
renamed_asset_count: int = 0
unreal.log("Selected {} assets".format(asset_count))
for asset in selected_assets:
# Get the name of the assets
asset_name = system_lib.get_object_name(asset)
# Check if the asset name contains the to-be-replaced text
if string_lib.contains(asset_name, search_pattern, use_case=case_sensitive):
search_case = (
unreal.SearchCase.CASE_SENSITIVE
if case_sensitive
else unreal.SearchCase.IGNORE_CASE
)
new_asset_name = string_lib.replace(
asset_name, search_pattern, replace_pattern, search_case=search_case
)
editor_util.rename_asset(asset, new_asset_name)
renamed_asset_count += 1
unreal.log("Renamed {} to {}.".format(asset_name, new_asset_name))
else:
unreal.log(
"{} did not match the search pattern, skipping rename operation.".format(
asset_name
)
)
unreal.log("Rename {} of {} assets.".format(renamed_asset_count, asset_count))
return renamed_asset_count
|
import unreal
# def listAssetPaths():
# # print ('reloaded')
# EAL = unreal.EditorAssetLibrary
# assetPaths = EAL.list_assets('/Game')
# for i in assetPaths:
# print (i)
# def getSelectionContentBrowser():
# EUL = unreal.EditorUtilityLibrary
# selectedAssets = EUL.get_selected_assets()
# for i in selectedAssets:
# print(i)
# LABEL = {
# "Car": 1;
# "Pedestrian": 2;
# "TraffiLight": 3;
# "Building": 4;
# }
# all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
# for actor in all_actors:
# actor_name = actor.get_name()
# for label, stencil_values in LAEBL.items():
# if label.lower() in actor_name.lower():
VEHICLE_STENCIL_VALUE = 1
# Get all actors in the level
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
print("\n🎬 Checking for 'vehCar' meshes:")
for actor in all_actors:
# Get mesh components (Static or Skeletal)
mesh_components = actor.get_components_by_class(unreal.StaticMeshComponent)
if not mesh_components:
mesh_components = actor.get_components_by_class(unreal.SkeletalMeshComponent)
if mesh_components:
for mesh_component in mesh_components:
mesh = mesh_component.get_editor_property("static_mesh") if isinstance(mesh_component, unreal.StaticMeshComponent) else mesh_component.get_editor_property("skeletal_mesh")
if mesh:
mesh_name = mesh.get_name()
# Check if the mesh name contains "vehCar"
if "vehCar" in mesh_name:
print(f"🚗 Found Vehicle: {mesh_name} (Applying Stencil {VEHICLE_STENCIL_VALUE})")
# Enable custom depth and assign stencil value on the Mesh Component
mesh_component.set_editor_property("render_custom_depth", True)
mesh_component.set_editor_property("custom_depth_stencil_value", VEHICLE_STENCIL_VALUE)
print("\n✅ Stencil applied to 'vehCar' meshes!")
|
# develop 分支
import unreal
import csv
import time
import os
import pandas as pd
from multiprocessing import Pool, Process
import threading
from concurrent.futures import ThreadPoolExecutor
import concurrent.futures
# TODO:可以当成snippet方便复用
def saveToAsset(actor , GW , AO):
try:
setting = unreal.MeshMergingSettings()
setting.pivot_point_at_zero = True
setting.merge_materials = False
options = unreal.MergeStaticMeshActorsOptions(
destroy_source_actors = False,
spawn_merged_actor = False,
mesh_merging_settings = setting
)
temp_dir = os.path.join('/project/', str(GW), str(AO))
actor_name = actor.get_name()
actor_label = actor.get_actor_label()
asset_path = os.path.join(temp_dir, actor_label).replace('\\','/')
options.base_package_name = asset_path # The package path you want to save to
# TODO:将选中的actor合并成 static mesh
if unreal.EditorAssetLibrary().does_asset_exist(asset_path):
unreal.log("当前资产 %s 已存在" % actor_label)
else:
merge_actor = static_mesh_lib.merge_static_mesh_actors([actor],options) # 单独导出
print("EXPORT SUCCESS : %s is save to %s" % (actor.get_actor_label(), asset_path.replace('\\','/')))
except Exception as e:
unreal.log_error(f"Error in saveToAsset: {e}")
# 迭代下去找子级,开销较大
def get_all_attached_actors(actor, collected_actors=None):
if collected_actors is None:
collected_actors = []
# Get attached child actors
attached_actors = actor.get_attached_actors()
# Add attached actors to the list
for attached_actor in attached_actors:
collected_actors.append(attached_actor)
# Recursively get actors attached to this actor
get_all_attached_actors(attached_actor, collected_actors)
return collected_actors
def save_to_asset_on_main_thread(item, gw, ao):
# 在主线程上调用 saveToAsset 函数
print("11111111")
unreal.execute_on_main_thread(saveToAsset, item, gw, ao)
print("2222222222")
def threaded_task(item, gw, ao):
# 这里可以执行一些非引擎相关的计算或准备工作
save_to_asset_on_main_thread(item, gw, ao)
level_lib = unreal.get_editor_subsystem(unreal.EditorActorSubsystem)
asset_lib = unreal.get_editor_subsystem(unreal.EditorAssetSubsystem)
static_mesh_lib = unreal.get_editor_subsystem(unreal.StaticMeshEditorSubsystem)
selected_actors = level_lib.get_selected_level_actors()
if not selected_actors:
unreal.log_error("No actors selected.")
exit()
all_static_mesh_actors = []
for item in selected_actors:
label = item.get_name().split('_')
if label and label[0] == "StaticMeshActor":
all_static_mesh_actors.append(item)
# for item in all_static_mesh_actors:
# print(item.get_actor_label().split('_')[0])
# 检查命名是否匹配
df = pd.read_csv('/project/.csv')
df_target_col = df.loc[:,['工位','零组件号',"下级工艺件"]]
timeStart = time.time()
# 原始逻辑:已修改
# for item in all_static_mesh_actors:
# if item.static_mesh_component.static_mesh:
# label = item.get_actor_label().split('_')
# if label:
# df_copy = df_target_col[df['零组件号'] == label[0]].copy()
# # print(df_copy)
# if df_copy.size:
# for index, row in df_copy.iterrows():
# saveToAsset(item, row['工位'], row['下级工艺件'])
# else:
# unreal.log_error(f"{item.get_actor_label()} has no data in df_copy.")
# else:
# unreal.log_error(f"{item.get_actor_label()} has no static mesh component.")
# with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
# for item in all_static_mesh_actors:
# if item.static_mesh_component.static_mesh:
# label = item.get_actor_label().split('_')
# if label:
# df_copy = df_target_col[df['零组件号'] == label[0]].copy()
# # print(df_copy)
# if df_copy.size:
# for index, row in df_copy.iterrows():
# # 将任务提交给线程池
# executor.submit(threaded_task, item, row['工位'], row['下级工艺件'])
# else:
# unreal.log_error(f"{item.get_actor_label()} has no data in df_copy.")
# else:
# unreal.log_error(f"{item.get_actor_label()} has no static mesh component.")
# 批量处理:逻辑已修改
batch_size = 1000 # 每批处理的数量
num_batches = (len(all_static_mesh_actors) + batch_size - 1) # batch_size
for i in range(num_batches):
batch = all_static_mesh_actors[i*batch_size:(i+1)*batch_size]
for item in batch:
if item.static_mesh_component.static_mesh:
df_copy = df_target_col[df['零组件号'] == item.get_actor_label().split('_')[0]].copy()
if df_copy.size:
for index, row in df_copy.iterrows():
saveToAsset(item, row['工位'], row['下级工艺件'])
else:
unreal.log(f"当前csv数据中未找到:{item.get_actor_label()}")
else:
unreal.log_error(f"丢失static mesh component:{item.get_actor_label()}")
# 处理完一批后,可以调用GC(垃圾回收),以释放内存
unreal.SystemLibrary.collect_garbage()
unreal.log_warning(time.time() - timeStart)
# 资产保存逻辑
unreal.get_editor_subsystem(unreal.EditorAssetSubsystem).save_directory('/Game/',only_if_is_dirty=True,recursive=True)
# unreal.get_editor_subsystem(unreal.EditorAssetSubsystem).save_directory('/project/',only_if_is_dirty=True,recursive=True)
unreal.log("保存执行完毕!")
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
import os
import math
import unreal
from enum import IntEnum
from typing import Optional, Any
from dataclasses import dataclass, field, asdict
from openjd.model import parse_model
from openjd.model.v2023_09 import (
StepScript,
StepTemplate,
TaskParameterType,
HostRequirementsTemplate,
TaskParameterList,
StepParameterSpaceDefinition,
)
from openjd.model.v2023_09._model import StepDependency
from deadline.unreal_submitter import common
from deadline.unreal_submitter.unreal_open_job.unreal_open_job_entity import (
UnrealOpenJobEntity,
OpenJobStepParameterNames,
PARAMETER_DEFINITION_MAPPING,
ParameterDefinitionDescriptor,
)
from deadline.unreal_submitter.unreal_open_job.unreal_open_job_environment import (
UnrealOpenJobEnvironment,
)
from deadline.unreal_submitter.unreal_open_job.unreal_open_job_parameters_consistency import (
ParametersConsistencyChecker,
)
from deadline.unreal_logger import get_logger
from deadline.unreal_submitter import exceptions, settings
logger = get_logger()
@dataclass
class UnrealOpenJobStepParameterDefinition:
"""
Dataclass for storing and managing OpenJob Step Task parameter definitions
:cvar name: Name of the parameter
:cvar type: OpenJD Type of the parameter (INT, FLOAT, STRING, PATH)
:cvar range: List of parameter values
"""
name: str
type: str
range: list[Any] = field(default_factory=list)
@classmethod
def from_unreal_param_definition(cls, u_param: unreal.StepTaskParameterDefinition):
"""
Create UnrealOpenJobStepParameterDefinition instance from unreal.StepTaskParameterDefinition
object.
:return: UnrealOpenJobStepParameterDefinition instance
:rtype: UnrealOpenJobStepParameterDefinition
"""
python_class = PARAMETER_DEFINITION_MAPPING[u_param.type.name].python_class
build_kwargs = dict(
name=u_param.name,
type=u_param.type.name,
range=[python_class(p) for p in list(u_param.range)],
)
return cls(**build_kwargs)
@classmethod
def from_dict(cls, param_dict: dict):
"""
Create UnrealOpenJobStepParameterDefinition instance python dict
:return: UnrealOpenJobStepParameterDefinition instance
:rtype: UnrealOpenJobStepParameterDefinition
"""
return cls(**param_dict)
def to_dict(self):
"""
Return UnrealOpenJobStepParameterDefinition as dictionary
:return: UnrealOpenJobStepParameterDefinition as python dictionary
:rtype: dict[str, Any]
"""
return asdict(self)
# Base Step implementation
class UnrealOpenJobStep(UnrealOpenJobEntity):
"""
Unreal Open Job Step entity
"""
def __init__(
self,
file_path: Optional[str] = None,
name: Optional[str] = None,
step_dependencies: Optional[list[str]] = None,
environments: Optional[list[UnrealOpenJobEnvironment]] = None,
extra_parameters: Optional[list[UnrealOpenJobStepParameterDefinition]] = None,
host_requirements: Optional[HostRequirementsTemplate] = None,
):
"""
:param file_path: The file path of the step descriptor
:type file_path: str
:param name: The name of the step
:type name: str
:param step_dependencies: The list of step dependencies
:type step_dependencies: list[str]
:param environments: The list of environments
:type environments: list[UnrealOpenJobEnvironment]
:param extra_parameters: Extra Step parameters that should be used during the Job execution
:type extra_parameters: list[UnrealOpenJobStepParameterDefinition]
:param host_requirements: HostRequirements to apply to step during the Job execution
:type host_requirements: HostRequirements
"""
self._step_dependencies = step_dependencies or []
self._environments = environments or []
self._extra_parameters = extra_parameters or []
self._host_requirements = host_requirements
super().__init__(StepTemplate, file_path, name)
self._create_missing_extra_parameters_from_template()
self._open_job = None
@property
def host_requirements(self):
return self._host_requirements
@host_requirements.setter
def host_requirements(self, value: HostRequirementsTemplate):
self._host_requirements = value
@property
def step_dependencies(self) -> list[str]:
return self._step_dependencies
@step_dependencies.setter
def step_dependencies(self, value: list[str]):
self._step_dependencies = value
@property
def environments(self) -> list[UnrealOpenJobEnvironment]:
return self._environments
@environments.setter
def environments(self, value: list[UnrealOpenJobEnvironment]):
self._environments = value
@property
def open_job(self):
return self._open_job
@open_job.setter
def open_job(self, value):
self._open_job = value
@classmethod
def from_data_asset(cls, data_asset: unreal.DeadlineCloudStep) -> "UnrealOpenJobStep":
"""
Create the instance of UnrealOpenJobStep from unreal.DeadlineCloudStep.
Call same method on data_asset's environments.
:param data_asset: unreal.DeadlineCloudStep instance
:return: UnrealOpenJobStep instance
:rtype: UnrealOpenJobStep
"""
return cls(
file_path=data_asset.path_to_template.file_path,
name=data_asset.name,
step_dependencies=list(data_asset.depends_on),
environments=[
UnrealOpenJobEnvironment.from_data_asset(env) for env in data_asset.environments
],
extra_parameters=[
UnrealOpenJobStepParameterDefinition.from_unreal_param_definition(param)
for param in data_asset.get_step_parameters()
],
)
def _find_extra_parameter(
self, parameter_name: str, parameter_type: str
) -> Optional[UnrealOpenJobStepParameterDefinition]:
"""
Find extra parameter by given name and type
:param parameter_name: Parameter name
:param parameter_type: Parameter type (INT, FLOAT, STRING, PATH)
:return: Parameter if found, None otherwise
:rtype: Optional[UnrealOpenJobStepParameterDefinition]
"""
return next(
(
p
for p in self._extra_parameters
if p.name == parameter_name and p.type == parameter_type
),
None,
)
def _create_missing_extra_parameters_from_template(self):
"""
Update parameters with YAML template data. Mostly needed for custom job submission process.
If no template file found, skip updating and log warning.
This is not an error and should not break the building process.
"""
try:
extra_param_names = [p.name for p in self._extra_parameters]
for p in self.get_template_object()["parameterSpace"]["taskParameterDefinitions"]:
if p["name"] not in extra_param_names:
self._extra_parameters.append(UnrealOpenJobStepParameterDefinition.from_dict(p))
except FileNotFoundError:
logger.warning("No template file found to read parameters from.")
def _update_extra_parameter(
self, extra_parameter: UnrealOpenJobStepParameterDefinition
) -> bool:
"""
Update parameter by replacing existed parameter with given one
after comparing them by name and type.
:param extra_parameter: UnrealOpenJobStepParameterDefinition instance
:type: UnrealOpenJobStepParameterDefinition
:return: True if updated, False otherwise
:rtype: bool
"""
existed_parameter = self._find_extra_parameter(extra_parameter.name, extra_parameter.type)
if existed_parameter:
self._extra_parameters.remove(existed_parameter)
self._extra_parameters.append(extra_parameter)
return True
return False
def _check_parameters_consistency(self):
"""
Check Step parameters consistency
:return: Result of parameters consistency check
:rtype: ParametersConsistencyCheckResult
"""
result = ParametersConsistencyChecker.check_step_parameters_consistency(
step_template_path=self.file_path,
step_parameters=[p.to_dict() for p in self._extra_parameters],
)
result.reason = f'OpenJob Step "{self.name}": ' + result.reason
return result
def _build_step_parameter_definition_list(self) -> list:
"""
Build the job parameter definition list from the step template object.
:return: List of Step parameter definitions
:rtype: list
"""
step_template_object = self.get_template_object()
step_parameter_definition_list = TaskParameterList()
yaml_params = step_template_object["parameterSpace"]["taskParameterDefinitions"]
for yaml_p in yaml_params:
override_param = next(
(p for p in self._extra_parameters if p.name == yaml_p["name"]), None
)
if override_param:
yaml_p["range"] = override_param.range
param_descriptor: ParameterDefinitionDescriptor = PARAMETER_DEFINITION_MAPPING[
yaml_p["type"]
]
param_definition_cls = param_descriptor.task_parameter_openjd_class
step_parameter_definition_list.append(
parse_model(model=param_definition_cls, obj=yaml_p)
)
return step_parameter_definition_list
def _build_template(self) -> StepTemplate:
"""
Build StepTemplate OpenJD model.
Build process:
1. Fill Step parameter definition list
2. Fill Host Requirements if provided
3. Build given Environments
4. Set up Step dependencies
:return: StepTemplate instance
:rtype: StepTemplate
"""
step_template_object = self.get_template_object()
step_parameters = self._build_step_parameter_definition_list()
template_dict = {
"name": self.name,
"script": parse_model(model=StepScript, obj=step_template_object["script"]),
}
if step_parameters:
template_dict["parameterSpace"] = StepParameterSpaceDefinition(
taskParameterDefinitions=step_parameters,
combination=step_template_object["parameterSpace"].get("combination"),
)
if self._environments:
template_dict["stepEnvironments"] = [env.build_template() for env in self._environments]
if self._step_dependencies:
template_dict["dependencies"] = [
StepDependency(dependsOn=step_dependency)
for step_dependency in self._step_dependencies
]
if self.host_requirements:
template_dict["hostRequirements"] = self.host_requirements
return parse_model(model=self.template_class, obj=template_dict)
def get_asset_references(self):
"""
Return AssetReferences of itself that union given Environments' AssetReferences
:return: AssetReferences from this Step and its Environments
:rtype: AssetReferences
"""
asset_references = super().get_asset_references()
for environment in self._environments:
asset_references = asset_references.union(environment.get_asset_references())
return asset_references
def update_extra_parameter(self, extra_parameter: UnrealOpenJobStepParameterDefinition):
"""
Public method for updating UnrealOpenJobStep's extra parameters.
See _update_extra_parameter()
:param extra_parameter: UnrealOpenJobStepParameterDefinition instance
:type: UnrealOpenJobStepParameterDefinition
:return: True if updated, False otherwise
:rtype: bool
"""
return self._update_extra_parameter(extra_parameter)
# Render Step
class RenderUnrealOpenJobStep(UnrealOpenJobStep):
"""
Unreal Open Job Render Step entity
"""
default_template_path = settings.RENDER_STEP_TEMPLATE_DEFAULT_PATH
class RenderArgsType(IntEnum):
"""
Type of the render arguments
:cvar NOT_SET: Default value
:cvar QUEUE_MANIFEST_PATH: Use manifest file with serialized MoviePipelineQueue
:cvar RENDER_DATA: Use Level LevelSequence and MRQJobConfiguration unreal assets
:cvar MRQ_ASSET: Use MoviePipelineQueue unreal asset
"""
NOT_SET = 0
QUEUE_MANIFEST_PATH = 1
RENDER_DATA = 2
MRQ_ASSET = 3
def __init__(
self,
file_path: Optional[str] = None,
name: Optional[str] = None,
step_dependencies: Optional[list[str]] = None,
environments: Optional[list] = None,
extra_parameters: Optional[list] = None,
host_requirements: Optional[HostRequirementsTemplate] = None,
mrq_job: Optional[unreal.MoviePipelineExecutorJob] = None,
):
"""
:param file_path: The file path of the step descriptor
:type file_path: str
:param name: The name of the step
:type name: str
:param step_dependencies: The list of step dependencies
:type step_dependencies: list[str]
:param environments: The list of environments
:type environments: list
:param extra_parameters: The list of extra parameters
:type extra_parameters: list
:param host_requirements: HostRequirements instance
:type host_requirements: HostRequirements
:param mrq_job: MRQ Job object
:type mrq_job: unreal.MoviePipelineExecutorJob
"""
super().__init__(
file_path, name, step_dependencies, environments, extra_parameters, host_requirements
)
self._mrq_job = mrq_job
self._queue_manifest_path: Optional[str] = None
self._render_args_type = self._get_render_arguments_type()
@property
def mrq_job(self):
return self._mrq_job
@mrq_job.setter
def mrq_job(self, value: unreal.MoviePipelineExecutorJob):
self._mrq_job = value
def _get_chunk_ids_count(self) -> int:
"""
Get number of shot chunks
as count of enabled shots in level sequence divided by chuck size parameter value.
If no chunk size parameter value is 0, return 1
Example:
- LevelSequence shots: [sh1 - enabled, sh2 - disabled, sh3 - enabled, sh4 - enabled]
- ChunkSize: 2
- ChunkIds count: 2 (sh1, sh3; sh4)
:raises ValueError: When no chunk size parameter is set
:return: ChunkIds count
:rtype: int
"""
if not self.mrq_job:
raise exceptions.MrqJobIsMissingError("MRQ Job must be provided")
enabled_shots = [shot for shot in self.mrq_job.shot_info if shot.enabled]
if not self.open_job:
raise exceptions.OpenJobIsMissingError("Render Job must be provided")
chunk_size_parameter = self.open_job._find_extra_parameter(
parameter_name=OpenJobStepParameterNames.TASK_CHUNK_SIZE, parameter_type="INT"
)
if chunk_size_parameter is None:
raise ValueError(
f'Render Job\'s parameter "{OpenJobStepParameterNames.TASK_CHUNK_SIZE}" '
f"must be provided in extra parameters or template"
)
chunk_size = chunk_size_parameter.value
if chunk_size <= 0:
chunk_size = 1 # by default 1 chunk consist of 1 shot
task_chunk_ids_count = math.ceil(len(enabled_shots) / chunk_size)
return task_chunk_ids_count
def _get_render_arguments_type(self) -> Optional["RenderUnrealOpenJobStep.RenderArgsType"]:
"""
Return the render arguments type depending on Step parameters
Priority of render argument type setting:
- RenderArgsType.QUEUE_MANIFEST_PATH If QueueManifestPath parameter exists
- RenderArgsType.MRQ_ASSET if MoviePipelineQueuePath parameter exists
- RenderArgsType.RENDER_DATA If LevelPath, LevelSequencePath, MrqJobConfigurationPath
parameters exists
- RenderArgsType.NOT_SET if there are no valid parameters
"""
parameter_names = [p.name for p in self._extra_parameters]
for p in parameter_names:
if p == OpenJobStepParameterNames.QUEUE_MANIFEST_PATH:
return RenderUnrealOpenJobStep.RenderArgsType.QUEUE_MANIFEST_PATH
if p == OpenJobStepParameterNames.MOVIE_PIPELINE_QUEUE_PATH:
return RenderUnrealOpenJobStep.RenderArgsType.MRQ_ASSET
if {
OpenJobStepParameterNames.LEVEL_SEQUENCE_PATH,
OpenJobStepParameterNames.LEVEL_PATH,
OpenJobStepParameterNames.MRQ_JOB_CONFIGURATION_PATH,
}.issubset(set(parameter_names)):
return RenderUnrealOpenJobStep.RenderArgsType.RENDER_DATA
return RenderUnrealOpenJobStep.RenderArgsType.NOT_SET
def _build_template(self) -> StepTemplate:
"""
Build StepTemplate OpenJD model.
Build process:
1. Forcibly update Step parameters listed in OpenJobStepParameterNames.
If QueueManifestPath parameter exists, set up QueueManifestPath parameter definition
and add it to Step Asset References.
2. Fill Step parameter definition list
3. Fill Host Requirements if provided
4. Build given Environments
5. Set up Step dependencies
:return: StepTemplate instance
:rtype: StepTemplate
"""
if self._render_args_type == RenderUnrealOpenJobStep.RenderArgsType.NOT_SET:
raise exceptions.RenderArgumentsTypeNotSetError(
"RenderOpenJobStep parameters are not valid. Expect one of the followin/project/"
f"- {OpenJobStepParameterNames.QUEUE_MANIFEST_PATH}\n"
f"- {OpenJobStepParameterNames.MOVIE_PIPELINE_QUEUE_PATH}\n"
f"- ({OpenJobStepParameterNames.LEVEL_SEQUENCE_PATH}, "
f"{OpenJobStepParameterNames.LEVEL_PATH}, "
f"{OpenJobStepParameterNames.MRQ_JOB_CONFIGURATION_PATH})\n"
)
task_chunk_id_param_definition = UnrealOpenJobStepParameterDefinition(
OpenJobStepParameterNames.TASK_CHUNK_ID,
TaskParameterType.INT.value,
[i for i in range(self._get_chunk_ids_count())],
)
self._update_extra_parameter(task_chunk_id_param_definition)
handler_param_definition = UnrealOpenJobStepParameterDefinition(
OpenJobStepParameterNames.ADAPTOR_HANDLER, TaskParameterType.STRING.value, ["render"]
)
self._update_extra_parameter(handler_param_definition)
if self.mrq_job:
output_setting = self.mrq_job.get_configuration().find_setting_by_class(
unreal.MoviePipelineOutputSetting
)
output_path = output_setting.output_directory.path
common.validate_path_does_not_contain_non_valid_chars(output_path)
path_context = common.get_path_context_from_mrq_job(self.mrq_job)
output_path = output_path.format_map(path_context).rstrip("/")
output_param_definition = UnrealOpenJobStepParameterDefinition(
OpenJobStepParameterNames.OUTPUT_PATH, TaskParameterType.PATH.value, [output_path]
)
self._update_extra_parameter(output_param_definition)
if self._render_args_type == RenderUnrealOpenJobStep.RenderArgsType.QUEUE_MANIFEST_PATH:
manifest_param_definition = UnrealOpenJobStepParameterDefinition(
OpenJobStepParameterNames.QUEUE_MANIFEST_PATH,
TaskParameterType.PATH.value,
[self._save_manifest_file()],
)
self._update_extra_parameter(manifest_param_definition)
step_entity = super()._build_template()
return step_entity
def _save_manifest_file(self) -> Optional[str]:
"""
Create new MoviePipelineQueue object, add given MRQ Job here and serialize it to .utxt file
:return: Path to serialized manifest file
:rtype: str
"""
new_queue = unreal.MoviePipelineQueue()
new_job = new_queue.duplicate_job(self.mrq_job)
# In duplicated job remove empty auto-detected files since
# we don't want them to be saved in manifest
new_job.preset_overrides.job_attachments.input_files.auto_detected = (
unreal.DeadlineCloudFileAttachmentsArray()
)
new_job.preset_overrides.job_attachments.input_directories.auto_detected_directories = (
unreal.DeadlineCloudDirectoryAttachmentsArray()
)
_, manifest_path = unreal.MoviePipelineEditorLibrary.save_queue_to_manifest_file(new_queue)
serialized_manifest = unreal.MoviePipelineEditorLibrary.convert_manifest_file_to_string(
manifest_path
)
movie_render_pipeline_dir = os.path.join(
unreal.SystemLibrary.get_project_saved_directory(),
"UnrealDeadlineCloudService",
"RenderJobManifests",
)
os.makedirs(movie_render_pipeline_dir, exist_ok=True)
render_job_manifest_path = unreal.Paths.create_temp_filename(
movie_render_pipeline_dir, prefix="RenderJobManifest", extension=".utxt"
)
with open(render_job_manifest_path, "w") as manifest:
logger.info(f"Saving Manifest file `{render_job_manifest_path}`")
manifest.write(serialized_manifest)
self._queue_manifest_path = unreal.Paths.convert_relative_path_to_full(
render_job_manifest_path
)
return self._queue_manifest_path
def get_asset_references(self):
"""
Return AssetReferences of itself that union given Environments' AssetReferences and
add generated ManifestFile path if exists
:return: AssetReferences from this Step and its Environments
:rtype: AssetReferences
"""
asset_references = super().get_asset_references()
if self._queue_manifest_path:
asset_references.input_filenames.add(self._queue_manifest_path)
return asset_references
# UGS Steps
class UgsRenderUnrealOpenJobStep(RenderUnrealOpenJobStep):
"""Class for predefined UGS Step"""
default_template_path = settings.UGS_RENDER_STEP_TEMPLATE_DEFAULT_PATH
# Perforce (non UGS) Steps
class P4RenderUnrealOpenJobStep(RenderUnrealOpenJobStep):
default_template_path = settings.P4_RENDER_STEP_TEMPLATE_DEFAULT_PATH
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
class ProcessHDA(object):
""" An object that wraps async processing of an HDA (instantiating,
cooking/project/ an HDA), with functions that are called at the
various stages of the process, that can be overridden by subclasses for
custom funtionality:
- on_failure()
- on_complete(): upon successful completion (could be PostInstantiation
if auto cook is disabled, PostProcessing if auto bake is disabled, or
after PostAutoBake if auto bake is enabled.
- on_pre_instantiation(): before the HDA is instantiated, a good place
to set parameter values before the first cook.
- on_post_instantiation(): after the HDA is instantiated, a good place
to set/configure inputs before the first cook.
- on_post_auto_cook(): right after a cook
- on_pre_process(): after a cook but before output objects have been
created/processed
- on_post_processing(): after output objects have been created
- on_post_auto_bake(): after outputs have been baked
Instantiate the processor via the constructor and then call the activate()
function to start the asynchronous process.
"""
def __init__(
self,
houdini_asset,
instantiate_at=unreal.Transform(),
parameters=None,
node_inputs=None,
parameter_inputs=None,
world_context_object=None,
spawn_in_level_override=None,
enable_auto_cook=True,
enable_auto_bake=False,
bake_directory_path="",
bake_method=unreal.HoudiniEngineBakeOption.TO_ACTOR,
remove_output_after_bake=False,
recenter_baked_actors=False,
replace_previous_bake=False,
delete_instantiated_asset_on_completion_or_failure=False):
""" Instantiates an HDA in the specified world/level. Sets parameters
and inputs supplied in InParameters, InNodeInputs and parameter_inputs.
If bInEnableAutoCook is true, cooks the HDA. If bInEnableAutoBake is
true, bakes the cooked outputs according to the supplied baking
parameters.
This all happens asynchronously, with the various output pins firing at
the various points in the process:
- PreInstantiation: before the HDA is instantiated, a good place
to set parameter values before the first cook (parameter values
from ``parameters`` are automatically applied at this point)
- PostInstantiation: after the HDA is instantiated, a good place
to set/configure inputs before the first cook (inputs from
``node_inputs`` and ``parameter_inputs`` are automatically applied
at this point)
- PostAutoCook: right after a cook
- PreProcess: after a cook but before output objects have been
created/processed
- PostProcessing: after output objects have been created
- PostAutoBake: after outputs have been baked
- Completed: upon successful completion (could be PostInstantiation
if auto cook is disabled, PostProcessing if auto bake is disabled,
or after PostAutoBake if auto bake is enabled).
- Failed: If the process failed at any point.
Args:
houdini_asset (HoudiniAsset): The HDA to instantiate.
instantiate_at (Transform): The Transform to instantiate the HDA with.
parameters (Map(Name, HoudiniParameterTuple)): The parameters to set before cooking the instantiated HDA.
node_inputs (Map(int32, HoudiniPublicAPIInput)): The node inputs to set before cooking the instantiated HDA.
parameter_inputs (Map(Name, HoudiniPublicAPIInput)): The parameter-based inputs to set before cooking the instantiated HDA.
world_context_object (Object): A world context object for identifying the world to spawn in, if spawn_in_level_override is null.
spawn_in_level_override (Level): If not nullptr, then the HoudiniAssetActor is spawned in that level. If both spawn_in_level_override and world_context_object are null, then the actor is spawned in the current editor context world's current level.
enable_auto_cook (bool): If true (the default) the HDA will cook automatically after instantiation and after parameter, transform and input changes.
enable_auto_bake (bool): If true, the HDA output is automatically baked after a cook. Defaults to false.
bake_directory_path (str): The directory to bake to if the bake path is not set via attributes on the HDA output.
bake_method (HoudiniEngineBakeOption): The bake target (to actor vs blueprint). @see HoudiniEngineBakeOption.
remove_output_after_bake (bool): If true, HDA temporary outputs are removed after a bake. Defaults to false.
recenter_baked_actors (bool): Recenter the baked actors to their bounding box center. Defaults to false.
replace_previous_bake (bool): If true, on every bake replace the previous bake's output (assets + actors) with the new bake's output. Defaults to false.
delete_instantiated_asset_on_completion_or_failure (bool): If true, deletes the instantiated asset actor on completion or failure. Defaults to false.
"""
super(ProcessHDA, self).__init__()
self._houdini_asset = houdini_asset
self._instantiate_at = instantiate_at
self._parameters = parameters
self._node_inputs = node_inputs
self._parameter_inputs = parameter_inputs
self._world_context_object = world_context_object
self._spawn_in_level_override = spawn_in_level_override
self._enable_auto_cook = enable_auto_cook
self._enable_auto_bake = enable_auto_bake
self._bake_directory_path = bake_directory_path
self._bake_method = bake_method
self._remove_output_after_bake = remove_output_after_bake
self._recenter_baked_actors = recenter_baked_actors
self._replace_previous_bake = replace_previous_bake
self._delete_instantiated_asset_on_completion_or_failure = delete_instantiated_asset_on_completion_or_failure
self._asset_wrapper = None
self._cook_success = False
self._bake_success = False
@property
def asset_wrapper(self):
""" The asset wrapper for the instantiated HDA processed by this node. """
return self._asset_wrapper
@property
def cook_success(self):
""" True if the last cook was successful. """
return self._cook_success
@property
def bake_success(self):
""" True if the last bake was successful. """
return self._bake_success
@property
def houdini_asset(self):
""" The HDA to instantiate. """
return self._houdini_asset
@property
def instantiate_at(self):
""" The transform the instantiate the asset with. """
return self._instantiate_at
@property
def parameters(self):
""" The parameters to set on on_pre_instantiation """
return self._parameters
@property
def node_inputs(self):
""" The node inputs to set on on_post_instantiation """
return self._node_inputs
@property
def parameter_inputs(self):
""" The object path parameter inputs to set on on_post_instantiation """
return self._parameter_inputs
@property
def world_context_object(self):
""" The world context object: spawn in this world if spawn_in_level_override is not set. """
return self._world_context_object
@property
def spawn_in_level_override(self):
""" The level to spawn in. If both this and world_context_object is not set, spawn in the editor context's level. """
return self._spawn_in_level_override
@property
def enable_auto_cook(self):
""" Whether to set the instantiated asset to auto cook. """
return self._enable_auto_cook
@property
def enable_auto_bake(self):
""" Whether to set the instantiated asset to auto bake after a cook. """
return self._enable_auto_bake
@property
def bake_directory_path(self):
""" Set the fallback bake directory, for if output attributes do not specify it. """
return self._bake_directory_path
@property
def bake_method(self):
""" The bake method/target: for example, to actors vs to blueprints. """
return self._bake_method
@property
def remove_output_after_bake(self):
""" Remove temporary HDA output after a bake. """
return self._remove_output_after_bake
@property
def recenter_baked_actors(self):
""" Recenter the baked actors at their bounding box center. """
return self._recenter_baked_actors
@property
def replace_previous_bake(self):
""" Replace previous bake output on each bake. For the purposes of this
node, this would mostly apply to .uassets and not actors.
"""
return self._replace_previous_bake
@property
def delete_instantiated_asset_on_completion_or_failure(self):
""" Whether or not to delete the instantiated asset after Complete is called. """
return self._delete_instantiated_asset_on_completion_or_failure
def activate(self):
""" Activate the process. This will:
- instantiate houdini_asset and wrap it as asset_wrapper
- call on_failure() for any immediate failures
- otherwise bind to delegates from asset_wrapper so that the
various self.on_*() functions are called as appropriate
Returns immediately (does not block until cooking/processing is
complete).
Returns:
(bool): False if activation failed.
"""
# Get the API instance
houdini_api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
if not houdini_api:
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
# Create an empty API asset wrapper
self._asset_wrapper = unreal.HoudiniPublicAPIAssetWrapper.create_empty_wrapper(houdini_api)
if not self._asset_wrapper:
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
# Bind to the wrapper's delegates for instantiation, cooking, baking
# etc events
self._asset_wrapper.on_pre_instantiation_delegate.add_callable(
self._handle_on_pre_instantiation)
self._asset_wrapper.on_post_instantiation_delegate.add_callable(
self._handle_on_post_instantiation)
self._asset_wrapper.on_post_cook_delegate.add_callable(
self._handle_on_post_auto_cook)
self._asset_wrapper.on_pre_process_state_exited_delegate.add_callable(
self._handle_on_pre_process)
self._asset_wrapper.on_post_processing_delegate.add_callable(
self._handle_on_post_processing)
self._asset_wrapper.on_post_bake_delegate.add_callable(
self._handle_on_post_auto_bake)
# Begin the instantiation process of houdini_asset and wrap it with
# self.asset_wrapper
if not houdini_api.instantiate_asset_with_existing_wrapper(
self.asset_wrapper,
self.houdini_asset,
self.instantiate_at,
self.world_context_object,
self.spawn_in_level_override,
self.enable_auto_cook,
self.enable_auto_bake,
self.bake_directory_path,
self.bake_method,
self.remove_output_after_bake,
self.recenter_baked_actors,
self.replace_previous_bake):
# Handle failures: this will unbind delegates and call on_failure()
self._handle_on_failure()
return False
return True
def _unbind_delegates(self):
""" Unbinds from self.asset_wrapper's delegates (if valid). """
if not self._asset_wrapper:
return
self._asset_wrapper.on_pre_instantiation_delegate.add_callable(
self._handle_on_pre_instantiation)
self._asset_wrapper.on_post_instantiation_delegate.add_callable(
self._handle_on_post_instantiation)
self._asset_wrapper.on_post_cook_delegate.add_callable(
self._handle_on_post_auto_cook)
self._asset_wrapper.on_pre_process_state_exited_delegate.add_callable(
self._handle_on_pre_process)
self._asset_wrapper.on_post_processing_delegate.add_callable(
self._handle_on_post_processing)
self._asset_wrapper.on_post_bake_delegate.add_callable(
self._handle_on_post_auto_bake)
def _check_wrapper(self, wrapper):
""" Checks that wrapper matches self.asset_wrapper. Logs a warning if
it does not.
Args:
wrapper (HoudiniPublicAPIAssetWrapper): the wrapper to check
against self.asset_wrapper
Returns:
(bool): True if the wrappers match.
"""
if wrapper != self._asset_wrapper:
unreal.log_warning(
'[UHoudiniPublicAPIProcessHDANode] Received delegate event '
'from unexpected asset wrapper ({0} vs {1})!'.format(
self._asset_wrapper.get_name() if self._asset_wrapper else '',
wrapper.get_name() if wrapper else ''
)
)
return False
return True
def _handle_on_failure(self):
""" Handle any failures during the lifecycle of the process. Calls
self.on_failure() and then unbinds from self.asset_wrapper and
optionally deletes the instantiated asset.
"""
self.on_failure()
self._unbind_delegates()
if self.delete_instantiated_asset_on_completion_or_failure and self.asset_wrapper:
self.asset_wrapper.delete_instantiated_asset()
def _handle_on_complete(self):
""" Handles completion of the process. This can happen at one of
three stages:
- After on_post_instantiate(), if enable_auto_cook is False.
- After on_post_auto_cook(), if enable_auto_cook is True but
enable_auto_bake is False.
- After on_post_auto_bake(), if both enable_auto_cook and
enable_auto_bake are True.
Calls self.on_complete() and then unbinds from self.asset_wrapper's
delegates and optionally deletes the instantiated asset.
"""
self.on_complete()
self._unbind_delegates()
if self.delete_instantiated_asset_on_completion_or_failure and self.asset_wrapper:
self.asset_wrapper.delete_instantiated_asset()
def _handle_on_pre_instantiation(self, wrapper):
""" Called during pre_instantiation. Sets ``parameters`` on the HDA
and calls self.on_pre_instantiation().
"""
if not self._check_wrapper(wrapper):
return
# Set any parameters specified for the HDA
if self.asset_wrapper and self.parameters:
self.asset_wrapper.set_parameter_tuples(self.parameters)
self.on_pre_instantiation()
def _handle_on_post_instantiation(self, wrapper):
""" Called during post_instantiation. Sets inputs (``node_inputs`` and
``parameter_inputs``) on the HDA and calls self.on_post_instantiation().
Completes execution if enable_auto_cook is False.
"""
if not self._check_wrapper(wrapper):
return
# Set any inputs specified when the node was created
if self.asset_wrapper:
if self.node_inputs:
self.asset_wrapper.set_inputs_at_indices(self.node_inputs)
if self.parameter_inputs:
self.asset_wrapper.set_input_parameters(self.parameter_inputs)
self.on_post_instantiation()
# If not set to auto cook, complete execution now
if not self.enable_auto_cook:
self._handle_on_complete()
def _handle_on_post_auto_cook(self, wrapper, cook_success):
""" Called during post_cook. Sets self.cook_success and calls
self.on_post_auto_cook().
Args:
cook_success (bool): True if the cook was successful.
"""
if not self._check_wrapper(wrapper):
return
self._cook_success = cook_success
self.on_post_auto_cook(cook_success)
def _handle_on_pre_process(self, wrapper):
""" Called during pre_process. Calls self.on_pre_process().
"""
if not self._check_wrapper(wrapper):
return
self.on_pre_process()
def _handle_on_post_processing(self, wrapper):
""" Called during post_processing. Calls self.on_post_processing().
Completes execution if enable_auto_bake is False.
"""
if not self._check_wrapper(wrapper):
return
self.on_post_processing()
# If not set to auto bake, complete execution now
if not self.enable_auto_bake:
self._handle_on_complete()
def _handle_on_post_auto_bake(self, wrapper, bake_success):
""" Called during post_bake. Sets self.bake_success and calls
self.on_post_auto_bake().
Args:
bake_success (bool): True if the bake was successful.
"""
if not self._check_wrapper(wrapper):
return
self._bake_success = bake_success
self.on_post_auto_bake(bake_success)
self._handle_on_complete()
def on_failure(self):
""" Called if the process fails to instantiate or fails to start
a cook.
Subclasses can override this function implement custom functionality.
"""
pass
def on_complete(self):
""" Called if the process completes instantiation, cook and/or baking,
depending on enable_auto_cook and enable_auto_bake.
Subclasses can override this function implement custom functionality.
"""
pass
def on_pre_instantiation(self):
""" Called during pre_instantiation.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_instantiation(self):
""" Called during post_instantiation.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_auto_cook(self, cook_success):
""" Called during post_cook.
Subclasses can override this function implement custom functionality.
Args:
cook_success (bool): True if the cook was successful.
"""
pass
def on_pre_process(self):
""" Called during pre_process.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_processing(self):
""" Called during post_processing.
Subclasses can override this function implement custom functionality.
"""
pass
def on_post_auto_bake(self, bake_success):
""" Called during post_bake.
Subclasses can override this function implement custom functionality.
Args:
bake_success (bool): True if the bake was successful.
"""
pass
|
import unreal
cameras: list[unreal.CameraActor]
sorted_cameras = sorted(cameras, key=lambda camera: (
int(camera.get_actor_label().lower().partition('photospot')[2].partition('_')[0] or
camera.get_actor_label().lower().partition('samplespot')[2].partition('_')[0] or 0),
int(camera.get_actor_label().partition('_')[2] or 0) if '_' in camera.get_actor_label() else 0
))
|
# SPDX-FileCopyrightText: 2018-2025 Xavier Loux (BleuRaven)
#
# SPDX-License-Identifier: GPL-3.0-or-later
# ----------------------------------------------
# Blender For UnrealEngine
# https://github.com/project/-For-UnrealEngine-Addons
# ----------------------------------------------
from typing import Union
import unreal
from . import import_module_unreal_utils
from .asset_types import ExportAssetType, AssetFileTypeEnum
from . import config
if hasattr(unreal, 'InterchangeGenericAssetsPipeline'):
def task_options_default_preset(use_interchange: bool = True) -> Union[unreal.FbxImportUI, unreal.InterchangeGenericAssetsPipeline]:
"""Returns default task options preset based on interchange usage and Unreal version."""
if use_interchange:
options = unreal.InterchangeGenericAssetsPipeline()
else:
options = unreal.FbxImportUI()
return options
def task_options_static_mesh_preset(use_interchange: bool = True) -> Union[unreal.InterchangeGenericAssetsPipeline, unreal.FbxImportUI]:
"""Returns static mesh task options preset based on interchange usage."""
if use_interchange:
options = unreal.InterchangeGenericAssetsPipeline()
else:
options = unreal.FbxImportUI()
return options
def task_options_skeletal_mesh_preset(use_interchange: bool = True) -> Union[unreal.InterchangeGenericAssetsPipeline, unreal.FbxImportUI]:
"""Returns skeletal mesh task options preset based on interchange usage."""
if use_interchange:
options = unreal.InterchangeGenericAssetsPipeline()
else:
options = unreal.FbxImportUI()
return options
def task_options_animation_preset(use_interchange: bool = True) -> Union[unreal.InterchangeGenericAssetsPipeline, unreal.FbxImportUI]:
"""Returns animation task options preset based on interchange usage."""
if use_interchange:
options = unreal.InterchangeGenericAssetsPipeline()
else:
options = unreal.FbxImportUI()
return options
else:
def task_options_default_preset(use_interchange: bool = True) -> unreal.FbxImportUI:
"""Returns default task options preset for Unreal Engine versions below 5, without interchange support."""
return unreal.FbxImportUI()
def task_options_static_mesh_preset(use_interchange: bool = True) -> unreal.FbxImportUI:
"""Returns static mesh task options preset without interchange support."""
return unreal.FbxImportUI()
def task_options_skeletal_mesh_preset(use_interchange: bool = True) -> unreal.FbxImportUI:
"""Returns skeletal mesh task options preset without interchange support."""
return unreal.FbxImportUI()
def task_options_animation_preset(use_interchange: bool = True) -> unreal.FbxImportUI:
"""Returns animation task options preset without interchange support."""
return unreal.FbxImportUI()
if import_module_unreal_utils.alembic_importer_active():
# Add the function only if alembic importer is active
def task_options_alembic_preset(use_interchange: bool = True) -> unreal.AbcImportSettings:
"""Returns Alembic task options preset."""
options = unreal.AbcImportSettings()
return options
if hasattr(unreal, 'InterchangeGenericAssetsPipeline'):
def init_options_data(asset_type: ExportAssetType, filetype: AssetFileTypeEnum):
"""Initializes task options based on asset type and interchange usage."""
# For FBX file it better to not use interchange before UE 5.5.
if config.force_use_interchange:
use_interchange = True
elif filetype.value == AssetFileTypeEnum.FBX.value and import_module_unreal_utils.get_unreal_version() < (5,5,0):
use_interchange = False
else:
use_interchange = True
# Add the function only if alembic importer is active
if asset_type.value == ExportAssetType.ANIM_ALEMBIC.value and import_module_unreal_utils.alembic_importer_active():
options = task_options_alembic_preset(use_interchange)
elif asset_type.value == ExportAssetType.STATIC_MESH.value:
options = task_options_static_mesh_preset(use_interchange)
elif asset_type.value == ExportAssetType.SKELETAL_MESH.value:
options = task_options_skeletal_mesh_preset(use_interchange)
elif asset_type.is_skeletal_animation():
options = task_options_animation_preset(use_interchange)
else:
options = task_options_default_preset(use_interchange)
return options
else:
def init_options_data(asset_type: ExportAssetType, filetype: AssetFileTypeEnum):
"""Initializes task options based on asset type and interchange usage."""
# Add the function only if alembic importer is active
if asset_type.value == ExportAssetType.ANIM_ALEMBIC.value and import_module_unreal_utils.alembic_importer_active():
options = task_options_alembic_preset(False)
elif asset_type.value == ExportAssetType.STATIC_MESH.value:
options = task_options_static_mesh_preset(False)
elif asset_type.value == ExportAssetType.SKELETAL_MESH.value:
options = task_options_skeletal_mesh_preset(False)
elif asset_type.is_skeletal_animation():
options = task_options_animation_preset(False)
else:
options = task_options_default_preset(False)
return options
|
# -*- coding: utf-8 -*-
"""Load FBX with animations."""
import json
import os
import ayon_api
import unreal
from ayon_core.pipeline import (AYON_CONTAINER_ID,
get_current_project_name,
load_container,
discover_loader_plugins,
loaders_from_representation)
from ayon_core.pipeline.context_tools import get_current_folder_entity
from ayon_core.pipeline.load import LoadError
from ayon_unreal.api import pipeline as unreal_pipeline
from ayon_unreal.api import plugin
from unreal import (EditorAssetLibrary, MovieSceneSkeletalAnimationSection,
MovieSceneSkeletalAnimationTrack)
class AnimationFBXLoader(plugin.Loader):
"""Load Unreal SkeletalMesh from FBX."""
product_types = {"animation"}
label = "Import FBX Animation"
representations = {"fbx"}
icon = "cube"
color = "orange"
root = unreal_pipeline.AYON_ROOT_DIR
loaded_asset_dir = "{folder[path]}/{product[name]}_{version[version]}"
loaded_asset_name = "{folder[name]}_{product[name]}_{version[version]}_{representation[name]}" # noqa
show_dialog = False
@classmethod
def apply_settings(cls, project_settings):
super(AnimationFBXLoader, cls).apply_settings(project_settings)
# Apply import settings
unreal_settings = project_settings["unreal"]["import_settings"]
cls.loaded_asset_dir = unreal_settings["loaded_asset_dir"]
cls.loaded_asset_name = unreal_settings["loaded_asset_name"]
cls.show_dialog = unreal_settings["show_dialog"]
def _import_latest_skeleton(self, version_ids):
version_ids = set(version_ids)
project_name = get_current_project_name()
repre_entities = ayon_api.get_representations(
project_name,
representation_names={"fbx"},
version_ids=version_ids,
fields={"id"}
)
repre_entity = next(
(repre_entity for repre_entity
in repre_entities), None)
if not repre_entity:
raise LoadError(
f"No valid representation for version {version_ids}")
repre_id = repre_entity["id"]
target_loader = None
all_loaders = discover_loader_plugins()
loaders = loaders_from_representation(
all_loaders, repre_id)
for loader in loaders:
if loader.__name__ == "SkeletalMeshFBXLoader":
target_loader = loader
assets = load_container(
target_loader,
repre_id,
namespace=None,
options={}
)
return assets
@classmethod
def _import_animation(
cls, path, asset_dir, asset_name,
skeleton, automated, replace=False,
loaded_options=None
):
task = unreal.AssetImportTask()
task.options = unreal.FbxImportUI()
folder_entity = get_current_folder_entity(fields=["attrib.fps"])
task.set_editor_property('filename', path)
task.set_editor_property('destination_path', asset_dir)
task.set_editor_property('destination_name', asset_name)
task.set_editor_property('replace_existing', replace)
task.set_editor_property('automated', not cls.show_dialog)
task.set_editor_property('save', False)
# set import options here
task.options.set_editor_property(
'automated_import_should_detect_type', True)
task.options.set_editor_property(
'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH)
task.options.set_editor_property(
'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION)
task.options.set_editor_property('import_mesh', False)
task.options.set_editor_property('import_animations', True)
task.options.set_editor_property('override_full_name', True)
task.options.set_editor_property('skeleton', skeleton)
task.options.anim_sequence_import_data.set_editor_property(
'animation_length',
unreal.FBXAnimationLengthImportType.FBXALIT_SET_RANGE
)
task.options.anim_sequence_import_data.set_editor_property(
'frame_import_range', unreal.Int32Interval(
min=loaded_options.get("frameStart"),
max=loaded_options.get("frameEnd")
))
task.options.anim_sequence_import_data.set_editor_property(
'import_meshes_in_bone_hierarchy', False)
task.options.anim_sequence_import_data.set_editor_property(
'use_default_sample_rate', False)
task.options.anim_sequence_import_data.set_editor_property(
'custom_sample_rate', folder_entity.get("attrib", {}).get("fps"))
task.options.anim_sequence_import_data.set_editor_property(
'import_custom_attribute', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_bone_tracks', True)
task.options.anim_sequence_import_data.set_editor_property(
'remove_redundant_keys', False)
task.options.anim_sequence_import_data.set_editor_property(
'convert_scene', True)
task.options.anim_sequence_import_data.set_editor_property(
'force_front_x_axis', False)
if unreal_pipeline.UNREAL_VERSION.major == 5 and (
unreal_pipeline.UNREAL_VERSION.minor <=4
):
task.options.anim_sequence_import_data.set_editor_property(
'import_rotation',
unreal.Rotator(roll=90.0, pitch=0.0, yaw=0.0)
)
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
def _process(self, path, asset_dir, asset_name,
instance_name, loaded_options=None):
automated = False
actor = None
if instance_name:
automated = True
# Old method to get the actor
# actor_name = 'PersistentLevel.' + instance_name
# actor = unreal.EditorLevelLibrary.get_actor_reference(actor_name)
actors = unreal.EditorLevelLibrary.get_all_level_actors()
for a in actors:
if a.get_class().get_name() != "SkeletalMeshActor":
continue
if a.get_actor_label() == instance_name:
actor = a
break
if not actor:
raise LoadError(f"Could not find actor {instance_name}")
skeleton = actor.skeletal_mesh_component.skeletal_mesh.skeleton
if not actor:
return None
self._import_animation(
path, asset_dir, asset_name,
skeleton, automated, loaded_options=loaded_options)
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
animation = None
for a in asset_content:
imported_asset_data = EditorAssetLibrary.find_asset_data(a)
imported_asset = unreal.AssetRegistryHelpers.get_asset(
imported_asset_data)
if imported_asset.__class__ == unreal.AnimSequence:
animation = imported_asset
break
if animation:
animation.set_editor_property('enable_root_motion', True)
actor.skeletal_mesh_component.set_editor_property(
'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE)
actor.skeletal_mesh_component.animation_data.set_editor_property(
'anim_to_play', animation)
return animation
def _load_from_json(
self, libpath, path, asset_dir, asset_name, hierarchy_dir,
loaded_options=None
):
with open(libpath, "r") as fp:
data = json.load(fp)
instance_name = data.get("instance_name")
animation = self._process(
path, asset_dir, asset_name,
instance_name, loaded_options=loaded_options)
asset_content = EditorAssetLibrary.list_assets(
hierarchy_dir, recursive=True, include_folder=False)
# Get the sequence for the layout, excluding the camera one.
sequences = [a for a in asset_content
if (EditorAssetLibrary.find_asset_data(a).get_class() ==
unreal.LevelSequence.static_class() and
"_camera" not in a.split("/")[-1])]
ar = unreal.AssetRegistryHelpers.get_asset_registry()
for s in sequences:
sequence = ar.get_asset_by_object_path(s).get_asset()
possessables = [
p for p in sequence.get_possessables()
if p.get_display_name() == instance_name]
for p in possessables:
tracks = [
t for t in p.get_tracks()
if (t.get_class() ==
MovieSceneSkeletalAnimationTrack.static_class())]
for t in tracks:
sections = [
s for s in t.get_sections()
if (s.get_class() ==
MovieSceneSkeletalAnimationSection.static_class())]
for section in sections:
section.params.set_editor_property('animation', animation)
@staticmethod
def is_skeleton(asset):
return asset.get_class() == unreal.Skeleton.static_class()
def _load_standalone_animation(
self, path, asset_dir, asset_name,
version_id, loaded_options=None
):
selection = unreal.EditorUtilityLibrary.get_selected_assets()
skeleton = None
if selection:
skeleton = selection[0]
if not self.is_skeleton(skeleton):
self.log.warning(
f"Selected asset {skeleton.get_name()} is not "
f"a skeleton. It is {skeleton.get_class().get_name()}")
skeleton = None
print("Trying to find original rig with links.")
# If no skeleton is selected, we try to find the skeleton by
# checking linked rigs.
project_name = get_current_project_name()
server = ayon_api.get_server_api_connection()
v_links = server.get_version_links(
project_name, version_id=version_id)
entities = [v_link["entityId"] for v_link in v_links]
linked_versions = list(server.get_versions(project_name, entities))
rigs = [
version["id"] for version in linked_versions
if "rig" in version["attrib"]["families"]]
self.log.debug(f"Found rigs: {rigs}")
ar = unreal.AssetRegistryHelpers.get_asset_registry()
containers = unreal_pipeline.ls()
for container in containers:
self.log.debug(f"Checking container: {container}")
if container["parent"] in rigs:
unreal.log("{}".format(container["parent"]))
# we found loaded version of the linked rigs
if container["loader"] != "SkeletalMeshFBXLoader":
continue
namespace = container["namespace"]
_filter = unreal.ARFilter(
class_names=["Skeleton"],
package_paths=[namespace],
recursive_paths=False)
if skeletons := ar.get_assets(_filter):
skeleton = skeletons[0].get_asset()
break
# Check if the asset exists in the content plugin
_content_filter = (
self._get_content_plugin_filter(ar, namespace)
)
if skeletons := ar.get_assets(_content_filter):
skeleton = skeletons[0].get_asset()
break
if not skeleton:
ar = unreal.AssetRegistryHelpers.get_asset_registry()
skeleton_asset = self._import_latest_skeleton(rigs)
for asset in skeleton_asset:
obj = ar.get_asset_by_object_path(asset).get_asset()
if obj.get_class().get_name() == 'Skeleton':
skeleton = obj
if not self.is_skeleton(skeleton):
raise LoadError("Selected asset is not a skeleton.")
self.log.info(f"Using skeleton: {skeleton.get_name()}")
self._import_animation(
path, asset_dir, asset_name,
skeleton, True, loaded_options=loaded_options
)
def _get_content_plugin_filter(self, asset_registry, namespace):
asset_template = namespace.replace("/Game", "")
for package in asset_registry.get_all_assets():
package_dir = str(package.package_path)
if asset_template in package_dir:
_content_plugin_filter = unreal.ARFilter(
class_names=["Skeleton"],
package_paths=[package_dir],
recursive_paths=False)
return _content_plugin_filter
return None
def _import_animation_with_json(self, path, context, hierarchy,
asset_dir, folder_name,
asset_name, loaded_options=None):
libpath = path.replace(".fbx", ".json")
master_level = None
if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir):
EditorAssetLibrary.make_directory(asset_dir)
# check if json file exists.
if os.path.exists(libpath):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
_filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"{self.root}/{hierarchy[0]}"],
recursive_paths=False)
levels = ar.get_assets(_filter)
master_level = levels[0].get_asset().get_path_name()
hierarchy_dir = self.root
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_dir = f"{hierarchy_dir}/{folder_name}"
_filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"{hierarchy_dir}/"],
recursive_paths=True)
levels = ar.get_assets(_filter)
level = levels[0].get_asset().get_path_name()
unreal.EditorLevelLibrary.save_all_dirty_levels()
unreal.EditorLevelLibrary.load_level(level)
self._load_from_json(
libpath, path, asset_dir, asset_name, hierarchy_dir)
else:
should_use_layout = loaded_options.get("layout", False)
if should_use_layout and (
existing_asset_dir := (
unreal_pipeline.get_dir_from_existing_asset(
asset_dir, asset_name)
)
):
asset_dir = existing_asset_dir
else:
version_id = context["representation"]["versionId"]
if not unreal.EditorAssetLibrary.does_asset_exist(
f"{asset_dir}/{asset_name}"):
self._load_standalone_animation(
path, asset_dir, asset_name,
version_id, loaded_options)
return master_level, asset_dir
def imprint(
self,
folder_path,
asset_dir,
container_name,
asset_name,
representation,
product_type,
folder_entity,
project_name,
layout
):
data = {
"schema": "ayon:container-2.0",
"id": AYON_CONTAINER_ID,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": representation["id"],
"parent": representation["versionId"],
"folder_path": folder_path,
"product_type": product_type,
# TODO these shold be probably removed
"asset": folder_path,
"family": product_type,
"frameStart": folder_entity["attrib"]["frameStart"],
"frameEnd": folder_entity["attrib"]["frameEnd"],
"project_name": project_name,
"layout": layout
}
unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data)
def load(self, context, name, namespace, options):
"""
Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): Product name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and Ayon container
folder_entity = context["folder"]
folder_path = folder_entity["path"]
hierarchy = folder_path.lstrip("/").split("/")
folder_name = hierarchy.pop(-1)
product_type = context["product"]["productType"]
suffix = "_CON"
path = self.filepath_from_context(context)
asset_root, asset_name = unreal_pipeline.format_asset_directory(
context, self.loaded_asset_dir, self.loaded_asset_name
)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
asset_root, suffix="")
container_name += suffix
should_use_layout = options.get("layout", False)
loaded_options = {
"frameStart": folder_entity["attrib"]["frameStart"],
"frameEnd": folder_entity["attrib"]["frameEnd"],
"layout": should_use_layout
}
master_level, asset_dir = self._import_animation_with_json(
path, context, hierarchy,
asset_dir, folder_name, asset_name,
loaded_options=loaded_options
)
if not unreal.EditorAssetLibrary.does_asset_exist(
f"{asset_dir}/{container_name}"):
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
self.imprint(
folder_path,
asset_dir,
container_name,
asset_name,
context["representation"],
product_type,
folder_entity,
context["project"]["name"],
should_use_layout
)
imported_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=False)
for asset in imported_content:
loaded_asset = EditorAssetLibrary.load_asset(asset)
# Enable root motion for animations so they are oriented correctly
if loaded_asset.get_class() == unreal.AnimSequence.static_class():
loaded_asset.set_editor_property("enable_root_motion", True)
loaded_asset.set_editor_property(
"root_motion_root_lock",
unreal.RootMotionRootLock.ANIM_FIRST_FRAME)
EditorAssetLibrary.save_asset(asset)
if master_level:
unreal.EditorLevelLibrary.save_current_level()
unreal.EditorLevelLibrary.load_level(master_level)
def update(self, container, context):
# Create directory for folder and Ayon container
folder_path = context["folder"]["path"]
hierarchy = folder_path.lstrip("/").split("/")
folder_name = hierarchy.pop(-1)
folder_name = context["folder"]["name"]
product_type = context["product"]["productType"]
repre_entity = context["representation"]
folder_entity = context["folder"]
suffix = "_CON"
source_path = self.filepath_from_context(context)
asset_root, asset_name = unreal_pipeline.format_asset_directory(
context, self.loaded_asset_dir, self.loaded_asset_name
)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
asset_root, suffix="")
container_name += suffix
should_use_layout = container.get("layout", False)
loaded_options = {
"frameStart": folder_entity["attrib"]["frameStart"],
"frameEnd": folder_entity["attrib"]["frameEnd"],
"layout": should_use_layout
}
master_level, asset_dir = self._import_animation_with_json(
source_path, context, hierarchy,
asset_dir, folder_name, asset_name,
loaded_options=loaded_options
)
if not unreal.EditorAssetLibrary.does_asset_exist(
f"{asset_dir}/{container_name}"):
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir
)
# update metadata
self.imprint(
folder_path,
asset_dir,
container_name,
asset_name,
repre_entity,
product_type,
folder_entity,
context["project"]["name"],
should_use_layout
)
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
if master_level:
unreal.EditorLevelLibrary.save_current_level()
unreal.EditorLevelLibrary.load_level(master_level)
return asset_content
def remove(self, container):
path = container["namespace"]
if unreal.EditorAssetLibrary.does_directory_exist(path):
unreal.EditorAssetLibrary.delete_directory(path)
|
import unreal
# lala = unreal.TakeMetaData()
# lala.unlock()
# lala.set_slate("lala")
# lala.set_take_number(1)
# print(lala.get_slate())
# tk = unreal.TakeRecorderBlueprintLibrary()
# unreal.TakeRecorderBlueprintLibrary.open_take_recorder_panel()
# panel = unreal.TakeRecorderBlueprintLibrary.get_take_recorder_panel()
# metadata = panel.get_take_meta_data()
# sourcelist = panel.get_sources()
# levelsequence = panel.get_level_sequence()
# # trp = unreal.TakeRecorderPanel()
# # print(trp.get_take_meta_data())
# tkparama = unreal.TakeRecorderProjectParameters()
# dirpath = unreal.DirectoryPath()
# dirpath.path = "/Game"
# tkparama.default_slate = "lala"
# tkparama.root_take_save_dir = dirpath
# tkparam = unreal.TakeRecorderParameters()
# # tkparam.set_editor_properties({"default_slate": "lala"})
# # tkparam.set_editor_properties({"root_take_save_dir": "/Game"})
# # tk.start_recording(levelsequence, sourcelist, metadata, tkparam)
# # tkSimple = unreal.TakeRecorderPanel()
# # tkSimple.start_recording()
# #start record
class TakeRecordera:
def __init__(self):
unreal.TakeRecorderBlueprintLibrary.open_take_recorder_panel()
self.start_recording = (
unreal.TakeRecorderBlueprintLibrary.start_recording
)
self.get_take_recorder_panel = (
unreal.TakeRecorderBlueprintLibrary.get_take_recorder_panel()
)
self.get_level_sequence = (
self.get_take_recorder_panel.get_level_sequence()
)
self.get_sources = (
self.get_take_recorder_panel.get_sources()
)
self.metadata = (
self.get_take_recorder_panel.get_take_meta_data()
)
self.get_level_sequence.find_meta_data_by_class(unreal.TakeMetaData.get_class(unreal.Actor.get_class("Jesse")))
# Example usage
self.projectParameters = unreal.TakeRecorderProjectParameters(
default_slate="lala",
root_take_save_dir=unreal.DirectoryPath(path="/Game"),
take_save_dir="/Game",
)
self.parameters = unreal.TakeRecorderParameters()
self.start_recording(self.get_level_sequence, self.get_sources, self.metadata, self.parameters)
TakeRecordera()
|
"""
This script fixes decals and overlays in the level by updating their materials.
This script searches for all actors in the level with names starting with "info_overlay_".
For each matching actor, it retrieves the Static Mesh Component and updates the materials to add an alpha channel.
The materials are set to use the BLEND_TRANSLUCENT blend mode, and an alpha channel is added to the base color texture.
Note:
- This script assumes that the Common module is available and provides the get_component_by_class function.
- The materials that are modified are saved and recompiled.
Example:
To fix decals and overlays in the level, call the `fix_decals_overlays` function without any arguments.
fix_decals_overlays()
"""
import unreal
from Common import get_component_by_class
def fix_decals_overlays():
materials = list()
for actor in unreal.EditorLevelLibrary.get_all_level_actors():
name: str = actor.get_name()
if not name.startswith("info_overlay_"):
continue
print(f"Setup decals for: {actor.get_name()}")
try:
component = get_component_by_class(actor, unreal.StaticMeshComponent)
except KeyError:
continue
for index in range(component.get_num_materials()):
material = component.get_material(index)
assert isinstance(material, unreal.Material), \
f"Material required, not a material interface for {material.get_name()}"
material.set_editor_property("blend_mode", unreal.BlendMode.BLEND_TRANSLUCENT)
texture = unreal.MaterialEditingLibrary.get_material_property_input_node(
material, unreal.MaterialProperty.MP_BASE_COLOR)
status = unreal.MaterialEditingLibrary.connect_material_property(
texture, "A", unreal.MaterialProperty.MP_OPACITY)
unreal.MaterialEditingLibrary.recompile_material(material)
print(f" -> Add alpha channel to material: '{material.get_name()}' status={status}")
materials.append(material)
unreal.EditorAssetLibrary.save_loaded_assets(materials, only_if_is_dirty=False)
if __name__ == '__main__':
fix_decals_overlays()
|
# /project/
# @CBgameDev Optimisation Script - Log Materials With Missing Physical Materials
# /project/
import unreal
import os
EditAssetLib = unreal.EditorAssetLibrary()
SystemsLib = unreal.SystemLibrary
workingPath = "/Game/" # Using the root directory
notepadFilePath = os.path.dirname(__file__) + "//PythonOptimiseLog.txt"
allAssets = EditAssetLib.list_assets(workingPath, True, False)
selectedAssetsPath = workingPath
LogStringsArray = []
numOfOptimisations = 0
with unreal.ScopedSlowTask(len(allAssets), selectedAssetsPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
_assetData = EditAssetLib.find_asset_data(asset)
_assetName = _assetData.get_asset().get_name()
_assetPathName = _assetData.get_asset().get_path_name()
_assetClassName = _assetData.get_asset().get_class().get_name()
if _assetClassName == "Material" or _assetClassName == "MaterialInstanceConstant":
asset_obj = EditAssetLib.load_asset(asset)
StringToCheck = _assetData.get_asset().get_editor_property("phys_material") # asset_obj.get_editor_property("phys_material")
# unreal.log("Used Phys Mat = %s" % StringToCheck)
if not SystemsLib.is_valid(StringToCheck):
LogStringsArray.append(" %s ------------> At Path: %s \n" % (_assetName, _assetPathName))
# unreal.log("Asset Name: %s Path: %s \n" % (_assetName, _assetPathName))
numOfOptimisations += 1
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
# Write results into a log file
# /project/
TitleOfOptimisation = "Log Materials With Missing Physical Materials"
DescOfOptimisation = "Searches the entire project for materials that don't have a phys mats plugged in"
SummaryMessageIntro = "-- Materials Without Phys Mats Plugged In --"
if unreal.Paths.file_exists(notepadFilePath): # Check if txt file already exists
os.remove(notepadFilePath) # if does remove it
# Create new txt file and run intro text
file = open(notepadFilePath, "a+") # we should only do this if have a count?
file.write("OPTIMISING SCRIPT by @CBgameDev \n")
file.write("==================================================================================================== \n")
file.write(" SCRIPT NAME: %s \n" % TitleOfOptimisation)
file.write(" DESCRIPTION: %s \n" % DescOfOptimisation)
file.write("==================================================================================================== \n \n")
if numOfOptimisations <= 0:
file.write(" -- NONE FOUND -- \n \n")
else:
for i in range(len(LogStringsArray)):
file.write(LogStringsArray[i])
# Run summary text
file.write("\n")
file.write("======================================================================================================= \n")
file.write(" SUMMARY: \n")
file.write(" %s \n" % SummaryMessageIntro)
file.write(" Found: %s \n \n" % numOfOptimisations)
file.write("======================================================================================================= \n")
file.write(" Logged to %s \n" % notepadFilePath)
file.write("======================================================================================================= \n")
file.close()
os.startfile(notepadFilePath) # Trigger the notepad file to open
|
"""Handles batch processing of animation files."""
import unreal
from PySide6 import QtCore
from .body_processor import BodyProcessor
from ...utils.logging_config import logger
class BodyBatchProcessor(QtCore.QObject):
"""Manages batch processing of animation files."""
progress_updated = QtCore.Signal(int, str, str, str)
processing_finished = QtCore.Signal()
error_occurred = QtCore.Signal(str)
def __init__(self):
"""Initialize the batch processor."""
super().__init__()
self.processor = BodyProcessor()
self.animation_files = []
self.current_index = 0
def run_batch_process(self, file_paths: list, character_name: str) -> None:
"""
Start batch processing of animation files.
Args:
file_paths: List of animation file paths to process
character_name: Target character name
"""
try:
self.animation_files = file_paths
self.current_index = 0
if not self.animation_files:
logger.error("No animation files provided for processing")
return
logger.info(f"Beginning batch process for {len(self.animation_files)} files")
QtCore.QTimer.singleShot(0, lambda: self._process_next_file(character_name))
except Exception as e:
logger.error(f"Batch process initialization failed: {str(e)}")
self.error_occurred.emit(str(e))
def _process_next_file(self, character_name: str) -> None:
"""Process the next animation file in the queue."""
try:
if self.current_index >= len(self.animation_files):
self.processing_finished.emit()
return
current_file = self.animation_files[self.current_index]
self._update_progress("Processing", current_file)
sequence = self.processor.process_asset(current_file, character_name)
self._handle_processing_result(sequence, current_file)
self.current_index += 1
QtCore.QTimer.singleShot(0, lambda: self._process_next_file(character_name))
except Exception as e:
logger.error(f"File processing error: {str(e)}")
self._update_progress("Failed", str(e))
self.current_index += 1
QtCore.QTimer.singleShot(0, lambda: self._process_next_file(character_name))
def _update_progress(self, status: str, message: str) -> None:
"""Update processing progress."""
self.progress_updated.emit(
self.current_index,
f"{status} {self.current_index + 1}/{len(self.animation_files)}",
message,
'processing'
)
def _handle_processing_result(self, sequence, file_path: str) -> None:
"""Handle the result of processing an animation file."""
if sequence:
self._update_progress("Complete", f"Processed: {sequence.get_name()}")
else:
self._update_progress("Failed", f"Failed to process: {file_path}")
|
import unreal
# Create all assets and objects we'll use
lvs = unreal.VariantManagerLibrary.create_level_variant_sets_asset("LVS", "/Game/")
lvs_actor = unreal.VariantManagerLibrary.create_level_variant_sets_actor(lvs)
if lvs is None or lvs_actor is None:
print "Failed to spawn either the LevelVariantSets asset or the LevelVariantSetsActor!"
quit()
# Create a variant set and add it to lvs
var_set1 = unreal.VariantSet()
var_set1.set_display_text("My VariantSet")
lvs.add_variant_set(var_set1)
# Create a variant and add it to var_set1
var1 = unreal.Variant()
var1.set_display_text("Variant 1")
var_set1.add_variant(var1)
# Create a test actor and add it to var1. The test actor has almost all possible types of capturable properties
location = unreal.Vector()
rotation = unreal.Rotator()
test_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.VariantManagerTestActor, location, rotation)
var1.add_actor_binding(test_actor)
capturable_props = unreal.VariantManagerLibrary.get_capturable_properties(test_actor)
captured_props = []
print "Capturable properties for actor '" + test_actor.get_actor_label() + "':"
for prop in capturable_props:
print "\t" + prop
# All test properties are named like 'Captured____Property'
# The check here avoids capturing generic Actor properties like 'Can be Damaged'
if str(prop).startswith('Captured') and str(prop).endswith('Property'):
new_prop = var1.capture_property(test_actor, prop)
captured_props.append(new_prop)
for prop in captured_props:
type_str = prop.get_property_type_string()
# Set a value for a property depending on its type
if type_str == "bool":
prop.set_value_bool(True)
elif type_str == "int":
prop.set_value_int(2)
elif type_str == "float":
prop.set_value_float(2.0)
elif type_str == "object":
cube = unreal.EditorAssetLibrary.load_asset("StaticMesh'/project/.Cube'")
prop.set_value_object(cube)
elif type_str == "strint":
prop.set_value_string("new string")
elif type_str == "rotator":
prop.set_value_rotator(unreal.Rotator(11, 12, 13))
elif type_str == "color":
prop.set_value_color(unreal.Color(21, 22, 23, 24))
elif type_str == "linear_color":
prop.set_value_linear_color(unreal.LinearColor(0.31, 0.32, 0.33, 0.34))
elif type_str == "vector":
prop.set_value_vector(unreal.Vector(41, 42, 43))
elif type_str == "quat":
prop.set_value_quat(unreal.Quat(0.51, 0.52, 0.53, 0.54))
elif type_str == "vector4":
prop.set_value_vector4(unreal.Vector4(6.1, 6.2, 6.3, 6.4))
elif type_str == "Vector2D":
prop.set_value_vector2d(unreal.Vector2D(7.1, 7.2))
elif type_str == "int_Point":
prop.set_value_int_point(unreal.IntPoint(81, 82))
# Easier to print using getattr
for prop in captured_props:
type_str = prop.get_property_type_string()
print(getattr(prop, "get_value_" + type_str)())
|
import unreal
import os
from tests.test_config import *
def create_test_directory(path):
"""Create a test directory if it doesn't exist."""
if not unreal.EditorAssetLibrary.does_directory_exist(path):
unreal.EditorAssetLibrary.make_directory(path)
def cleanup_test_assets(prefix=TEST_ASSET_PREFIX):
"""Clean up test assets with the given prefix."""
if not CLEANUP_TEST_ASSETS:
return
# Get all assets in test directories
test_paths = [
TEST_WIDGET_PATH,
TEST_ANIMATION_PATH,
TEST_SOUND_PATH
]
for path in test_paths:
if unreal.EditorAssetLibrary.does_directory_exist(path):
assets = unreal.EditorAssetLibrary.list_assets(path, recursive=True)
for asset in assets:
if os.path.basename(asset).startswith(prefix):
unreal.EditorAssetLibrary.delete_asset(asset)
def get_test_skeleton():
"""Get a reference to a test skeleton."""
return unreal.EditorAssetLibrary.load_asset("/project/")
def get_test_sound_file():
"""Get a reference to a test sound file."""
return unreal.EditorAssetLibrary.load_asset(TEST_SOUND_FILE)
def create_test_world():
"""Create a test world for testing gameplay functionality."""
world = unreal.EditorLevelLibrary.get_editor_world()
if not world:
world = unreal.EditorLevelLibrary.new_level("/project/")
return world
def cleanup_test_world():
"""Clean up the test world."""
world = unreal.EditorLevelLibrary.get_editor_world()
if world:
actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in actors:
if actor.get_name().startswith(TEST_ASSET_PREFIX):
unreal.EditorLevelLibrary.destroy_actor(actor)
def setup_test_environment():
"""Set up the test environment."""
# Create test directories
create_test_directory(TEST_PACKAGE_PATH)
create_test_directory(TEST_WIDGET_PATH)
create_test_directory(TEST_ANIMATION_PATH)
create_test_directory(TEST_SOUND_PATH)
def teardown_test_environment():
"""Clean up the test environment."""
if CLEANUP_TEST_ASSETS:
cleanup_test_assets()
cleanup_test_world()
|
#!/project/ python
# -*- coding: utf-8 -*-
#
# importAsset.py
# @Author : ()
# @Link :
# @Date : 2019/project/ 下午10:14:53
# 导入资产(可以是任意类型资产)
# execfile(r'/project/.py')
import unreal
import os
# 要导入资产内容的源路径
asset_path = '/project/.FBX'
print(asset_path)
# 要导入资产内容的目标路径
destination_path = '/project/'
# 生成导入任务
# filename: str : 要导入的资源的路径
# destination_path: str : 资产路径
# option: obj : 导入对象选项。对于导入时通常没有弹出窗口的资产,可以为“无”。(如声音、纹理等)
# return: obj : The import task object
def buildImportTask(filename='', destination_path='', options=None):
# https://docs.unrealengine.com/en-US/project/.html?highlight=assetimporttask
task = unreal.AssetImportTask() # 包含要导入的一组资产的数据
task.automated = True # 避免对话框
task.destination_name = '' # 导入为的可选自定义名称
task.destination_path = destination_path # 项目内容目录中将要导入资产的内容路径
task.filename = filename # 要导入的文件名
task.replace_existing = True # 覆盖现有资产
task.options = options # (对象) – [读写]特定于资产类型的导入选项
task.save = True # 导入后保存
# task.imported_object_paths # (Array(str)):[读写]导入后创建或更新的对象的路径
return task
# 建立静态网格导入选项
# return: obj : Import option object. The basic import options for importing a static mesh 导入选项对象。用于导入静态网格的基本导入选项
def buildStaticMeshImportOptions():
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_mesh', True)
options.set_editor_property('import_textures', False)
options.set_editor_property('import_materials', False)
options.set_editor_property('import_as_skeletal', False) # Static Mesh
# unreal.FbxMeshImportData
options.static_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.static_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.static_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxStaticMeshImportData
options.static_mesh_import_data.set_editor_property('combine_meshes', True)
options.static_mesh_import_data.set_editor_property('generate_lightmap_u_vs', True)
options.static_mesh_import_data.set_editor_property('auto_generate_collision', True)
return options
# 建立骨架网格导入选项
# return: obj : Import option object. The basic import options for importing a skeletal mesh 导入选项对象。用于导入骨架网格的基本导入选项
def buildSkeletalMeshImportOptions():
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_mesh', True)
options.set_editor_property('import_textures', False)
options.set_editor_property('import_materials', False)
options.set_editor_property('import_as_skeletal', True) # Skeletal Mesh
# unreal.FbxMeshImportData
options.skeletal_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.skeletal_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.skeletal_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxSkeletalMeshImportData
options.skeletal_mesh_import_data.set_editor_property('import_morph_targets', True)
options.skeletal_mesh_import_data.set_editor_property('update_skeleton_reference_pose', False)
return options
# 建立动画导入选项
# skeleton_path: str : Skeleton asset path of the skeleton that will be used to bind the animation
# return: obj : Import option object. The basic import options for importing an animation
def buildAnimationImportOptions(skeleton_path=''):
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_animations', True)
options.skeleton = unreal.load_asset(skeleton_path)
# unreal.FbxMeshImportData
options.anim_sequence_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.anim_sequence_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.anim_sequence_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxAnimSequenceImportData
options.anim_sequence_import_data.set_editor_property('animation_length', unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME)
options.anim_sequence_import_data.set_editor_property('remove_redundant_keys', False)
return options
# https://api.unrealengine.com/project/.html
# https://api.unrealengine.com/project/.html
# 执行导入任务
# tasks: obj List : The import tasks object. You can get them from buildImportTask() 导入任务对象。您可以从buildImportTask()获取它们
# return: str List : The paths of successfully imported assets 成功导入资产的路径
def executeImportTasks(tasks):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(tasks) # 使用指定的任务导入资产。
imported_asset_paths = []
for task in tasks:
for path in task.get_editor_property('imported_object_paths'):
imported_asset_paths.append(path)
return imported_asset_paths
def importAsset():
option = buildStaticMeshImportOptions()
asset_task = buildImportTask(asset_path, destination_path, option)
executeImportTasks([asset_task])
if __name__ == "__main__":
importAsset()
|
# -*- coding: utf-8 -*-
import logging
import unreal
import inspect
import types
import Utilities
from collections import Counter
class attr_detail(object):
def __init__(self, obj, name:str):
self.name = name
attr = None
self.bCallable = None
self.bCallable_builtin = None
try:
if hasattr(obj, name):
attr = getattr(obj, name)
self.bCallable = callable(attr)
self.bCallable_builtin = inspect.isbuiltin(attr)
except Exception as e:
unreal.log(str(e))
self.bProperty = not self.bCallable
self.result = None
self.param_str = None
self.bEditorProperty = None
self.return_type_str = None
self.doc_str = None
self.property_rw = None
if self.bCallable:
self.return_type_str = ""
if self.bCallable_builtin:
if hasattr(attr, '__doc__'):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
# print(f"~~~~~ attr: {self.name} docForDisplay: {docForDisplay} paramStr: {paramStr}")
# print(attr.__doc__)
try:
sig = inspect.getargspec(getattr(obj, self.name))
# print("+++ ", sig)
args = sig.args
argCount = len(args)
if "self" in args:
argCount -= 1
except TypeError:
argCount = -1
if "-> " in docForDisplay:
self.return_type_str = docForDisplay[docForDisplay.find(')') + 1:]
else:
self.doc_str = docForDisplay[docForDisplay.find(')') + 1:]
if argCount == 0 or (argCount == -1 and paramStr == ''):
# Method with No params
if '-> None' not in docForDisplay or self.name in ["__reduce__", "_post_init"]:
try:
if name == "get_actor_time_dilation" and isinstance(obj, unreal.Object):
# call get_actor_time_dilation will crash engine if actor is get from CDO and has no world.
if obj.get_world():
# self.result = "{}".format(attr.__call__())
self.result = attr.__call__()
else:
self.result = "skip call, world == None."
else:
# self.result = "{}".format(attr.__call__())
self.result = attr.__call__()
except:
self.result = "skip call.."
else:
print(f"docForDisplay: {docForDisplay}, self.name: {self.name}")
self.result = "skip call."
else:
self.param_str = paramStr
self.result = ""
else:
logging.error("Can't find p")
elif self.bCallable_other:
if hasattr(attr, '__doc__'):
if isinstance(attr.__doc__, str):
docForDisplay, paramStr = _simplifyDoc(attr.__doc__)
if name in ["__str__", "__hash__", "__repr__", "__len__"]:
try:
self.result = "{}".format(attr.__call__())
except:
self.result = "skip call."
else:
# self.result = "{}".format(getattr(obj, name))
self.result = getattr(obj, name)
def post(self, obj):
if self.bOtherProperty and not self.result:
try:
self.result = getattr(obj, self.name)
except:
self.result = "skip call..."
def apply_editor_property(self, obj, type_, rws, descript):
self.bEditorProperty = True
self.property_rw = "[{}]".format(rws)
try:
self.result = eval('obj.get_editor_property("{}")'.format(self.name))
except:
self.result = "Invalid"
def __str__(self):
s = f"Attr: {self.name} paramStr: {self.param_str} desc: {self.return_type_str} result: {self.result}"
if self.bProperty:
s += ", Property"
if self.bEditorProperty:
s += ", Eidtor Property"
if self.bOtherProperty:
s += ", Other Property "
if self.bCallable:
s += ", Callable"
if self.bCallable_builtin:
s += ", Callable_builtin"
if self.bCallable_other:
s += ", bCallable_other"
if self.bHasParamFunction:
s+= ", bHasParamFunction"
return s
def check(self):
counter = Counter([self.bOtherProperty, self.bEditorProperty, self.bCallable_other, self.bCallable_builtin])
# print("counter: {}".format(counter))
if counter[True] == 2:
unreal.log_error(f"{self.name}: {self.bEditorProperty}, {self.bOtherProperty} {self.bCallable_builtin} {self.bCallable_other}")
@property
def bOtherProperty(self):
if self.bProperty and not self.bEditorProperty:
return True
return False
@property
def bCallable_other(self):
if self.bCallable and not self.bCallable_builtin:
return True
return False
@property
def display_name(self, bRichText=True):
if self.bProperty:
return f"\t{self.name}"
else:
# callable
if self.param_str:
return f"\t{self.name}({self.param_str}) {self.return_type_str}"
else:
if self.bCallable_other:
return f"\t{self.name}" # __hash__, __class__, __eq__ 等
else:
return f"\t{self.name}() {self.return_type_str}"
@property
def display_result(self) -> str:
if self.bEditorProperty:
return "{} {}".format(self.result, self.property_rw)
else:
return "{}".format(self.result)
@property
def bHasParamFunction(self):
return self.param_str and len(self.param_str) != 0
def ll(obj):
if not obj:
return None
if inspect.ismodule(obj):
return None
result = []
for x in dir(obj):
attr = attr_detail(obj, x)
result.append(attr)
if hasattr(obj, '__doc__') and isinstance(obj, unreal.Object):
editorPropertiesInfos = _getEditorProperties(obj.__doc__, obj)
for name, type_, rws, descript in editorPropertiesInfos:
# print(f"~~ {name} {type} {rws}, {descript}")
index = -1
for i, v in enumerate(result):
if v.name == name:
index = i
break
if index != -1:
this_attr = result[index]
else:
this_attr = attr_detail(obj, name)
result.append(this_attr)
# unreal.log_warning(f"Can't find editor property: {name}")
this_attr.apply_editor_property(obj, type_, rws, descript)
for i, attr in enumerate(result):
attr.post(obj)
return result
def _simplifyDoc(content):
def next_balanced(content, s="(", e = ")" ):
s_pos = -1
e_pos = -1
balance = 0
for index, c in enumerate(content):
match = c == s or c == e
if not match:
continue
balance += 1 if c == s else -1
if c == s and balance == 1 and s_pos == -1:
s_pos = index
if c == e and balance == 0 and s_pos != -1 and e_pos == -1:
e_pos = index
return s_pos, e_pos
return -1, -1
# bracketS, bracketE = content.find('('), content.find(')')
if not content:
return "", ""
bracketS, bracketE = next_balanced(content, s='(', e = ')')
arrow = content.find('->')
funcDocPos = len(content)
endSign = ['--', '\n', '\r']
for s in endSign:
p = content.find(s)
if p != -1 and p < funcDocPos:
funcDocPos = p
funcDoc = content[:funcDocPos]
if bracketS != -1 and bracketE != -1:
param = content[bracketS + 1: bracketE].strip()
else:
param = ""
return funcDoc, param
def _getEditorProperties(content, obj):
# print("Content: {}".format(content))
lines = content.split('\r')
signFound = False
allInfoFound = False
result = []
for line in lines:
if not signFound and '**Editor Properties:**' in line:
signFound = True
if signFound:
#todo re
# nameS, nameE = line.find('``') + 2, line.find('`` ')
nameS, nameE = line.find('- ``') + 4, line.find('`` ')
if nameS == -1 or nameE == -1:
continue
typeS, typeE = line.find('(') + 1, line.find(')')
if typeS == -1 or typeE == -1:
continue
rwS, rwE = line.find('[') + 1, line.find(']')
if rwS == -1 or rwE == -1:
continue
name = line[nameS: nameE]
type_str = line[typeS: typeE]
rws = line[rwS: rwE]
descript = line[rwE + 2:]
allInfoFound = True
result.append((name, type_str, rws, descript))
# print(name, type, rws)
if signFound:
if not allInfoFound:
unreal.log_warning("not all info found {}".format(obj))
else:
unreal.log_warning("can't find editor properties in {}".format(obj))
return result
def log_classes(obj):
print(obj)
print("\ttype: {}".format(type(obj)))
print("\tget_class: {}".format(obj.get_class()))
if type(obj.get_class()) is unreal.BlueprintGeneratedClass:
generatedClass = obj.get_class()
else:
generatedClass = unreal.PythonBPLib.get_blueprint_generated_class(obj)
print("\tgeneratedClass: {}".format(generatedClass))
print("\tbp_class_hierarchy_package: {}".format(unreal.PythonBPLib.get_bp_class_hierarchy_package(generatedClass)))
def is_selected_asset_type(types):
selectedAssets = Utilities.Utils.get_selected_assets()
for asset in selectedAssets:
if type(asset) in types:
return True;
return False
|
import os
import hashlib
import requests
import unreal
def get_plugin_paths(plugin_name="MinesweeperMind"):
"""
Retrieve key directories for the plugin.
Returns a tuple: (plugin_root, scripts_dir, models_dir)
"""
plugin_root = os.path.join(unreal.Paths.project_plugins_dir(), plugin_name)
did_succeed, out_reason = unreal.Paths.validate_path(plugin_root)
if not did_succeed:
raise ValueError(f"Invalid plugin path: {out_reason}")
scripts_dir = os.path.join(plugin_root, "Content", "Scripts")
did_succeed, out_reason = unreal.Paths.validate_path(scripts_dir)
if not did_succeed:
raise ValueError(f"Invalid scripts path: {out_reason}")
models_dir = os.path.join(plugin_root, "Content", "LargeLanguageModels")
did_succeed, out_reason = unreal.Paths.validate_path(models_dir)
if not did_succeed:
raise ValueError(f"Invalid models path: {out_reason}")
return plugin_root, scripts_dir, models_dir
def download_model_if_missing(model_url, target_path, expected_sha256=None):
"""
Download a model from a secure URL if it doesn't already exist at target_path.
Args:
model_url (str): HTTPS URL for the model.
target_path (str): Local file path where the model should be saved.
expected_sha256 (str, optional): Expected SHA256 hash for file verification.
Returns:
bool: True if the file exists or was successfully downloaded (and verified), False otherwise.
"""
if os.path.exists(target_path):
unreal.log(f"Model file already exists at: {target_path}")
return True
unreal.log(f"Downloading model from {model_url} to {target_path}...")
try:
with requests.get(model_url, stream=True) as response:
response.raise_for_status()
with open(target_path, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
unreal.log("Download complete.")
except Exception as e:
unreal.log_error(f"Error downloading model: {e}")
return False
# Verify file hash if an expected hash is provided.
if expected_sha256:
with open(target_path, "rb") as f:
computed_hash = hashlib.sha256(f.read()).hexdigest()
if computed_hash.lower() != expected_sha256.lower():
unreal.log_error(f"SHA256 mismatch! Expected {expected_sha256.lower()}, but got {computed_hash.lower()}.")
return False
unreal.log("SHA256 hash verified successfully.")
return True
def setup_global_llm_model():
"""
Ensures the model file exists in the plugin's models directory.
Returns the full path to the model file if successful, or None otherwise.
"""
_, _, models_dir = get_plugin_paths()
if not os.path.exists(models_dir):
os.makedirs(models_dir)
unreal.log(f"Created models directory at: {models_dir}")
MODEL_URL = "https://huggingface.co/project/-1.1B-Chat-v1.0-GGUF/project/-1.1b-chat-v1.0.Q4_K_M.gguf"
TARGET_FILENAME = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
target_path = os.path.join(models_dir, TARGET_FILENAME)
EXPECTED_SHA256 = "9fecc3b3cd76bba89d504f29b616eedf7da85b96540e490ca5824d3f7d2776a0"
if download_model_if_missing(MODEL_URL, target_path, expected_sha256=EXPECTED_SHA256):
unreal.log(f"Global LLM model is ready at: {target_path}")
return target_path
unreal.log_error("Failed to download or verify the global LLM model.")
return None
GLOBAL_MODEL_PATH = None
def initialize_global_model():
"""
Initializes the global model path if it hasn't been set.
"""
global GLOBAL_MODEL_PATH
if GLOBAL_MODEL_PATH is None:
GLOBAL_MODEL_PATH = setup_global_llm_model()
return GLOBAL_MODEL_PATH
def get_global_model_path():
"""
Getter for the global model file path.
"""
if GLOBAL_MODEL_PATH is None:
unreal.log_warning("Global model path has not been initialized yet. Call initialize_global_model() first.")
return GLOBAL_MODEL_PATH
# Ensure the global model is set up.
initialize_global_model()
model_file_path = get_global_model_path()
if not model_file_path:
unreal.log_error("Global model path is not available!")
else:
unreal.log("Using model file at: {}".format(model_file_path))
# --------------------------------------------------------------------------
# Set up the LLM using LlamaCpp
from langchain_community.llms import LlamaCpp
from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
GLOBAL_LLM = LlamaCpp(
model_path=model_file_path,
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
verbose=True,
)
# --------------------------------------------------------------------------
# Use an LLMChain to generate game dimensions based on a user's request.
from langchain.chains import LLMChain
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import ChatPromptTemplate
# Define the expected JSON output schema.
response_schemas = [
ResponseSchema(name="rows", description="Number of rows in the Minesweeper grid."),
ResponseSchema(name="columns", description="Number of columns in the Minesweeper grid."),
ResponseSchema(name="mine_count", description="Number of mines to place on the grid."),
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
# Escape curly braces in format_instructions so they are treated literally.
escaped_format_instructions = format_instructions.replace("{", "{{").replace("}", "}}")
# Create a custom prompt that instructs the LLM to output strictly JSON with the required keys.
custom_prompt = ChatPromptTemplate.from_template(
f"""You are a game dimension generator for Minesweeper.
Based on the user's request, extract the grid dimensions and mine count.
Your output must be a valid JSON object with exactly three keys: "rows", "columns", and "mines" (not "mine_count").
Do not include any additional text.
{escaped_format_instructions}
User request: {{input}}
Answer:"""
)
# Create the LLMChain
chain = LLMChain(llm=GLOBAL_LLM, prompt=custom_prompt)
def get_game_dimensions(query: str) -> dict:
raw_output = chain.run({"input": query})
try:
parsed = output_parser.parse(raw_output)
# Normalize the output by renaming 'mine_count' to 'mines'
if "mine_count" in parsed:
parsed["mines"] = parsed.pop("mine_count")
return parsed
except Exception as e:
unreal.log_error(f"Error parsing output: {e}")
return {}
# Example usage:
if __name__ == "__main__":
user_query = "Create an expert level Minesweeper game"
dimensions = get_game_dimensions(user_query)
unreal.log(f"Extracted game dimensions: {dimensions}")
print(dimensions)
print()
user_query = "Create an medium level Minesweeper game"
dimensions = get_game_dimensions(user_query)
unreal.log(f"Extracted game dimensions: {dimensions}")
print(dimensions)
print()
user_query = "Create an easy level Minesweeper game"
dimensions = get_game_dimensions(user_query)
unreal.log(f"Extracted game dimensions: {dimensions}")
print(dimensions)
print()
user_query = "10 10 15"
dimensions = get_game_dimensions(user_query)
unreal.log(f"Extracted game dimensions: {dimensions}")
print(dimensions)
print()
user_query = "hey fam squad can you generate a 3x3 grid with 2 mines?"
dimensions = get_game_dimensions(user_query)
unreal.log(f"Extracted game dimensions: {dimensions}")
print(dimensions)
|
#!/project/ python
# -*- encoding: utf-8 -*-
import unreal
from MPath import MPath
e_util = unreal.EditorUtilityLibrary()
a_util = unreal.EditorAssetLibrary()
str_util = unreal.StringLibrary()
sys_util = unreal.SystemLibrary()
l_util = unreal.EditorLevelLibrary()
class UNode:
'''
对大纲种Actor 对象的封装,目前可以通过该对象修改的属性有
'''
def __init__(self, UActor):
# TODO 需要考虑实例化的时候 方式有哪几类
self.node = UActor
self.node_name = UActor.get_name()
self.node_type = UActor.get_class().get_fname()
self.node_path = UActor.get_path_name()
# self.node_asset =
self.vaild = False
self.hidden = False
self.hidden_game = False
# self.attribute_list = ['hidden']
pass
def hidden(self, game=False):
'''
隐藏该对象
:return:
'''
if game:
if self.hidden_game:
self.node.set_actor_hidden_in_game(False)
self.hidden_game = False
else:
self.node.set_actor_hidden_in_game(True)
self.hidden_game = True
else:
if self.hidden:
self.node.hidden
pass
def delete(self):
'''
删除该对象
:return:
'''
pass
def parent(self):
'''
放在某个父节点下
:return:
'''
pass
def __getattr__(self, item):
'''
绑定该类和封装类的属性设置
:param item:
:return:
'''
return getattr(self.node, 'get_' + item)
def __setattr__(self, key, value):
'''
绑定该类和封装类的属性设置
:param key:
:param value:
:return:
'''
set_func = getattr(self.node, 'set_' + key)
if set_func:
set_func(value)
def component(self):
'''
获取当前 node 下的所有component
:return:
'''
pass
def child(self):
'''
获取当前节点的子节点
:return:
'''
return self.node.get_all_child_actors()
def distance(self, actor):
'''
返回当前 actor 到其他actor 的距离
:return:
'''
pass
return self.node.get_distance_to(actor)
class FolderNode:
def __init__(self,directory_path):
self.path = directory_path.replace('\\','/')
if not a_util.does_directory_exist(self.path):
unreal.log_error("Path is not exists.")
def children(self):
'''
获取当前文件夹下的子文件
:return:
'''
pass
@property
def stem(self):
pass
@property
def name(self):
pass
def makedir(self):
pass
def listdir(self):
pass
def is_folder(self):
pass
def is_file(self):
pass
class ANode:
# Content Browser 中的asset_data的对象封装
def __init__(self, UAsset):
# 实例化时候可以 传入 object 实例化 或者对 asset_data 进行实例化 或者 提供asset_path 进行实例化
self.vaild = False
if type(UAsset) == type(unreal.AssetData()):
unreal.log("current Uasset is asset data")
# 说明当前 是 AssetData 被传入进来了
self.node_asset = UAsset
self.node = UAsset.get_asset()
self.node_path = UAsset.object_path
self.node_directory = sys_util.get_system_path(self.node)
self.node_type = self.node.asset_class
self.vaild = True
elif type(UAsset) == str:
unreal.log("current Uasset is node path")
# 如果默认传入路径字符串,首先会当文件夹类型处理,其次在会去当资产进行判断
if a_util.does_directory_exist(UAsset):
# 判断路径是否存在,间接的确认了传入进来的是条路径
self.node = FolderNode(UAsset)
self.node_path = sys_util.get_project_content_directory()+UAsset.replace('/Game/','')
self.node_type = 'folder'
self.node_asset = unreal.AssetData(self.node_path)
self.node_directory = UAsset
self.vaild = True
elif a_util.does_asset_exist(UAsset):
# 当前是资产路径的情况下
self.node = a_util.load_asset(UAsset)
self.node_type = self.node.get_full_name().split(' ')[0]
self.node_path = self.node.get_full_name().split(' ')[1]
self.node_asset = unreal.AssetData(self.node_path)
self.node_directory = sys_util.get_system_path(self.node)
self.vaild = True
else:
show_message('current folder is not exists。')
elif UAsset:
# 这个地方判别 为 object 类型方法,后续可以继续改善,目前默认上面两种情况都不是的时候就判定为 object 类型,
# 同时获取 object 类型转换为 asset_data 方法可以继续优化,目前完全是通过字符串拼接的方式进行分离
unreal.log("current Uasset is asset node")
self.node_path = UAsset.get_full_name().split(' ')[1]
self.node_type = UAsset.get_full_name().split(' ')[0]
self.node = UAsset
self.node_asset = unreal.AssetData(self.node_path)
self.node_directory = sys_util.get_system_path(self.node)
self.vaild = True
else:
unreal.log_error(UAsset)
unreal.log_error('convert asset node failure.')
if self.vaild:
self.name = self.node.get_name()
if self.node_type == 'folder':
self.init_folder()
elif self.node_type == 'blueprint':
self.init_blueprint()
else:
self.init_asset()
def init_asset(self):
'''
如果是asset 需要处理哪些事情
:return:
'''
# unreal.log(self.node_type+' init exec.')
pass
def init_blueprint(self):
'''
如果是蓝图需要处理的事情
:return:
'''
# unreal.log('blueprint init exec.')
pass
def init_folder(self):
'''
如果是folder类型需要针对folder 类型处理的事情,例如获取父文件夹,获取子列表
:return:
'''
# 获取该文件夹下的 文件情况 如果是 文件夹依然返回 文件夹的 ANode 如果是资产返回 ANode 资产类
file_list = self.node.listdir()
self.child = {}
# 如果该文件是 文件夹 转换成相对路径生成 folder 类型的 node
self.child.update({f.name:ANode(self.node_path+'/'+f.name) for f in file_list if f.is_folder()})
# TODO 这里测试发现 list_assets 无法获得 level object
for dp in a_util.list_assets(self.node_path, recursive=False):
nd = ANode(dp)
if nd.name in self.child:
# 说明有重名 可能是 文件夹和 object 重名,将名称重新对应为
self.child[nd.name] = (self.child[nd.name],nd)
@property
def parent(self):
if self.node_path == '/Game':
return self
else:
return ANode(self.node_path.replace('/'+self.node_path.split('/')[-1],''))
def delete(self):
if self.vaild:
if self.node_type == "folder":
a_util.delete_directory(self.node)
else:
a_util.delete_asset(self.node)
def list_dir(self):
if self.vaild:
return a_util.list_assets(self.node_path) if self.node_type == "folder" else None
def load(self, location=unreal.Vector(), rotation=unreal.Rotator()):
if self.vaild:
## 将当前 node 添加到场景
if self.node_type != 'folder':
if self.node_type == "blueprint":
l_util.spawn_actor_from_class(self.node, location, rotation)
else:
l_util.spawn_actor_from_object(self.node, location, rotation)
else:
show_message('current node is folder')
def copy(self, des_path):
'''
todo 将当前节点拷贝到某个地方
:return:
'''
pass
############# 测试函数 ###############
if __name__ == '__main__':
uactor = UNode(actor)
|
import unreal
import time
total_frames = 90
text_label = 'Working...'
with unreal.ScopedSlowTask(total_frames, text_label) as slow_task:
slow_task.make_dialog(True)
for i in range(total_frames):
if slow_task.should_cancel():
break
slow_task.enter_progress_frame(1)
time.sleep(1)
print(i)
|
import unreal
import os
from pathlib import Path
# import sys
# sys.path.append('C:/project/ Drone/project/')
# from rc_automation import upload_VR_to_gcs
def import_datatable():
fpath = "C:/project/.csv"
dpath = '/project/'
datatable_import_task = unreal.AssetImportTask()
datatable_import_task.filename = fpath
datatable_import_task.destination_path = dpath
datatable_import_task.replace_existing = True
datatable_import_task.automated = True
datatable_import_task.save = False
csv_factory = unreal.CSVImportFactory()
csv_factory.automated_import_settings.import_row_struct = unreal.load_object(None, '/project/.CameraLocations')
datatable_import_task.factory = csv_factory
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([datatable_import_task])
def import_images():
IMPORT_DIR = Path(r"/project/")
assert IMPORT_DIR.exists()
tasks = []
for images in IMPORT_DIR.glob("*.jpg"):
image_import_task = unreal.AssetImportTask()
image_import_task.filename = str(images)
image_import_task.destination_path = '/project/'
image_import_task.automated = True
tasks.append(image_import_task)
image_asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
image_asset_tools.import_asset_tasks(tasks)
def spawn_actor():
actor_obj = unreal.load_asset('/project/')
actor_location = unreal.Vector(0.0, 0.0, 100.0)
actor_rotation = unreal.Rotator(0.0, 0.0, -90.0)
unreal.EditorLevelLibrary.spawn_actor_from_object(actor_obj, actor_location, actor_rotation)
asset_path = '/project/'
task = unreal.AssetImportTask()
f = "C:/project/.fbx"
task.filename = f
task.destination_path = asset_path
task.destination_name = ''
task.replace_existing = True
task.automated = True
task.options = unreal.FbxImportUI()
task.options.import_as_skeletal = False
task.options.override_full_name = True
task.options.mesh_type_to_import = unreal.FBXImportType.FBXIT_STATIC_MESH
task.options.static_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 100.0))
task.options.static_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, -90.0))
task.options.static_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
task.options.static_mesh_import_data.set_editor_property('combine_meshes', True)
task.options.static_mesh_import_data.set_editor_property('generate_lightmap_u_vs', True)
task.options.static_mesh_import_data.set_editor_property('auto_generate_collision', True)
# task.options.static_mesh_import_data.set_editor_property('Coll', False)
# set_editor_property('collision_trace_flag', unreal.CollisionTraceFlag.CTF_USE_COMPLEX_AS_SIMPLE)
imported_asset = unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
imported_mesh = task.imported_object_paths
print(imported_mesh)
static_mesh = unreal.EditorAssetLibrary.load_asset('/project/.OfficeScptTest')
# Body = static_mesh.get_editor_property('body_setup')
# collision_trace_flag = unreal.CollisionTraceFlag.CTF_USE_COMPLEX_AS_SIMPLE
# Body.set_editor_property('collision_trace_flag', collision_trace_flag)
# static_mesh.set_editor_property('body_setup', Body)
for i, name in enumerate(imported_mesh):
name = unreal.EditorAssetLibrary.load_asset(name)
Body = name.get_editor_property('body_setup')
Body.set_editor_property('collision_trace_flag', unreal.CollisionTraceFlag.CTF_USE_COMPLEX_AS_SIMPLE)
name.set_editor_property('body_setup', Body)
# import_images()
# import_datatable()
spawn_actor()
unreal.EditorAssetLibrary.save_directory('/Game', only_if_is_dirty=True, recursive=True)
# unreal.EditorAssetLibrary.sync_browser_to_objects(['/project/'])
unreal.EditorAssetLibrary.checkout_asset('/project/.OfficeScptTest') # not working
|
"""
tools_capture_lookdev_renders.py
Run from UE Editor Python to capture a set of high-res renders from the LookDev level using HighResScreenshot.
"""
import os
from datetime import date
import unreal # noqa: F401
ROOT = unreal.SystemLibrary.get_project_directory()
LOG = os.path.normpath(os.path.join(ROOT, "../project/.md"))
# Default single-level capture; other multi-biome captures handled by tools_build_lookdev_levels
LEVEL_PATH = "/project/"
OUT_DIR = os.path.normpath(os.path.join(ROOT, "../project/"))
def append_log(msg: str):
try:
with open(LOG, "a", encoding="utf-8") as f:
f.write(f"\n[{date.today().isoformat()}] {msg}\n")
except Exception:
pass
def ensure_out_dir():
os.makedirs(OUT_DIR, exist_ok=True)
def capture(name: str, res_mult: int = 1):
ensure_out_dir()
unreal.EditorLevelLibrary.load_level(LEVEL_PATH)
filename = os.path.join(OUT_DIR, f"{name}.png")
unreal.AutomationLibrary.take_high_res_screenshot(1920 * res_mult, 1080 * res_mult, filename)
append_log(f"Captured lookdev render: {filename}")
def main():
capture("lookdev_overview", 1)
capture("lookdev_detail", 2)
unreal.SystemLibrary.print_string(None, "LookDev renders captured", text_color=unreal.LinearColor.GREEN)
if __name__ == "__main__":
main()
|
import unreal
asset = unreal.EditorAssetLibrary.load_asset("/project/")
# Get the EditorUtilitySubsystem
Subsys = unreal.get_editor_subsystem(unreal.EditorUtilitySubsystem)
# Spawn a new tab and register it with the EditorUtilitySubsystem
Subsys.spawn_and_register_tab(asset)
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example script for instantiating an asset, cooking it and baking all of
its outputs.
"""
_g_wrapper = None
def get_test_hda_path():
return '/project/.pig_head_subdivider_v01'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def on_post_instantiation(in_wrapper):
print('on_post_instantiation')
# in_wrapper.on_post_instantiation_state_exited_delegate_delegate.remove_callable(on_post_instantiation)
# Set parameter values for the next cook
# in_wrapper.set_bool_parameter_value('add_instances', True)
# in_wrapper.set_int_parameter_value('num_instances', 8)
in_wrapper.set_parameter_tuples({
'add_instances': unreal.HoudiniParameterTuple(bool_values=(True, )),
'num_instances': unreal.HoudiniParameterTuple(int32_values=(8, )),
})
# Print all parameter values
param_tuples = in_wrapper.get_parameter_tuples()
print('parameter tuples: {}'.format(len(param_tuples) if param_tuples else 0))
if param_tuples:
for param_tuple_name, param_tuple in param_tuples.items():
print('parameter tuple name: {}'.format(param_tuple_name))
print('\tbool_values: {}'.format(param_tuple.bool_values))
print('\tfloat_values: {}'.format(param_tuple.float_values))
print('\tint32_values: {}'.format(param_tuple.int32_values))
print('\tstring_values: {}'.format(param_tuple.string_values))
# Force a cook/recook
in_wrapper.recook()
def on_post_bake(in_wrapper, success):
in_wrapper.on_post_bake_delegate.remove_callable(on_post_bake)
print('bake complete ... {}'.format('success' if success else 'failed'))
# Delete the hda after the bake
in_wrapper.delete_instantiated_asset()
global _g_wrapper
_g_wrapper = None
def on_post_process(in_wrapper):
print('on_post_process')
# in_wrapper.on_post_processing_delegate.remove_callable(on_post_process)
# Print out all outputs generated by the HDA
num_outputs = in_wrapper.get_num_outputs()
print('num_outputs: {}'.format(num_outputs))
if num_outputs > 0:
for output_idx in range(num_outputs):
identifiers = in_wrapper.get_output_identifiers_at(output_idx)
print('\toutput index: {}'.format(output_idx))
print('\toutput type: {}'.format(in_wrapper.get_output_type_at(output_idx)))
print('\tnum_output_objects: {}'.format(len(identifiers)))
if identifiers:
for identifier in identifiers:
output_object = in_wrapper.get_output_object_at(output_idx, identifier)
output_component = in_wrapper.get_output_component_at(output_idx, identifier)
is_proxy = in_wrapper.is_output_current_proxy_at(output_idx, identifier)
print('\t\tidentifier: {}'.format(identifier))
print('\t\toutput_object: {}'.format(output_object.get_name() if output_object else 'None'))
print('\t\toutput_component: {}'.format(output_component.get_name() if output_component else 'None'))
print('\t\tis_proxy: {}'.format(is_proxy))
print('')
# bind to the post bake delegate
in_wrapper.on_post_bake_delegate.add_callable(on_post_bake)
# Bake all outputs to actors
print('baking all outputs to actors')
in_wrapper.bake_all_outputs_with_settings(
unreal.HoudiniEngineBakeOption.TO_ACTOR,
replace_previous_bake=False,
remove_temp_outputs_on_success=False)
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset, disabling auto-cook of the asset (so we have to
# call wrapper.reCook() to cook it)
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform(), enable_auto_cook=False)
# Bind to the on post instantiation delegate (before the first cook)
_g_wrapper.on_post_instantiation_delegate.add_callable(on_post_instantiation)
# Bind to the on post processing delegate (after a cook and after all
# outputs have been generated in Unreal)
_g_wrapper.on_post_processing_delegate.add_callable(on_post_process)
if __name__ == '__main__':
run()
|
#
# Copyright(c) 2025 The SPEAR Development Team. Licensed under the MIT License <http://opensource.org/project/>.
# Copyright(c) 2022 Intel. Licensed under the MIT License <http://opensource.org/project/>.
#
import spear
import spear.utils.editor_utils
import unreal
unreal_editor_subsystem = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
editor_world_name = unreal_editor_subsystem.get_editor_world().get_name()
if __name__ == "__main__":
spear.log("Processing scene: ", editor_world_name)
actors = spear.utils.editor_utils.find_actors()
for actor in actors:
spear.log(f" {spear.utils.editor_utils.get_stable_name_for_actor(actor=actor)}")
components = spear.utils.editor_utils.get_components(actor=actor)
for component in components:
spear.log(f" {spear.utils.editor_utils.get_stable_name_for_component(component=component)}")
|
import unreal
import os
path_index: int
projectPath = unreal.Paths.project_dir()
screenshotsPath = projectPath + "/project/"
saveGamesPath = projectPath + "/project/"
customizePath = projectPath + "/CustomizePresets/"
pathsArray = [screenshotsPath, saveGamesPath, customizePath]
targetPath = pathsArray[path_index]
path = os.path.realpath(targetPath)
os.startfile(path)
print("Open ", projectPath)
|
#!/project/ python3
"""
Animation Assignment Script
Assigns flipbook animations to the WarriorCharacter blueprint slots
"""
import unreal
def assign_animations_to_blueprint():
"""Assign all flipbook animations to the blueprint animation slots"""
print("=== Assigning Animations to Blueprint Slots ===")
# Load the WarriorCharacter blueprint
blueprint_path = "/project/"
blueprint = unreal.EditorAssetLibrary.load_asset(blueprint_path)
if not blueprint:
print(f"ERROR: Could not load blueprint: {blueprint_path}")
return False
print(f"✓ Loaded blueprint: {blueprint_path}")
# Get the blueprint's default object (the CDO - Class Default Object)
try:
# Method 1: Try getting generated class directly
generated_class = blueprint.generated_class()
default_object = generated_class.get_default_object()
except:
try:
# Method 2: Use get_default_object from blueprint
default_object = blueprint.get_default_object()
except:
# Method 3: Use BlueprintEditorLibrary
try:
default_object = unreal.BlueprintEditorLibrary.get_blueprint_default_object(blueprint)
except:
print("ERROR: Could not get blueprint default object using any method")
return False
if not default_object:
print("ERROR: Default object is None")
return False
print(f"✓ Got default object: {type(default_object)}")
# Animation slot mappings (C++ property name -> Flipbook asset path)
animation_assignments = {
'IdleAnimation': '/project/',
'MoveAnimation': '/project/',
'AttackUpAnimation': '/project/',
'AttackDownAnimation': '/project/',
'AttackSideAnimation': '/project/',
'AttackUp2Animation': '/project/',
'AttackDown2Animation': '/project/',
'AttackSide2Animation': '/project/'
}
assigned_count = 0
# Assign each animation
for property_name, animation_path in animation_assignments.items():
print(f"\nAssigning {property_name} -> {animation_path}")
# Load the animation asset
animation_asset = unreal.EditorAssetLibrary.load_asset(animation_path)
if not animation_asset:
print(f"✗ Could not load animation: {animation_path}")
continue
print(f"✓ Loaded animation asset: {animation_asset.get_name()}")
# Try to set the property on the default object
try:
# Method 1: Direct property assignment
if hasattr(default_object, property_name.lower()):
setattr(default_object, property_name.lower(), animation_asset)
print(f"✓ Set {property_name} using direct assignment")
assigned_count += 1
else:
# Method 2: Using set_editor_property
try:
default_object.set_editor_property(property_name, animation_asset)
print(f"✓ Set {property_name} using set_editor_property")
assigned_count += 1
except Exception as e:
print(f"✗ Could not set {property_name}: {e}")
# Method 3: Try with Blueprint utilities
try:
unreal.BlueprintEditorLibrary.set_blueprint_variable_instance_editable(
blueprint, property_name, True
)
default_object.set_editor_property(property_name, animation_asset)
print(f"✓ Set {property_name} using BlueprintEditorLibrary")
assigned_count += 1
except Exception as e2:
print(f"✗ All methods failed for {property_name}: {e2}")
except Exception as e:
print(f"✗ Exception setting {property_name}: {e}")
# Compile and save the blueprint
if assigned_count > 0:
print(f"\nCompiling blueprint with {assigned_count} assignments...")
# Compile the blueprint
compile_result = unreal.BlueprintEditorLibrary.compile_blueprint(blueprint)
if compile_result:
print("✓ Blueprint compiled successfully")
else:
print("✗ Blueprint compilation failed")
# Mark blueprint as modified and save
unreal.EditorAssetLibrary.save_asset(blueprint_path)
print("✓ Blueprint saved")
return True
else:
print("✗ No animations were assigned")
return False
def verify_assignments():
"""Verify that animations were assigned correctly"""
print("\n=== Verifying Animation Assignments ===")
# Load the blueprint
blueprint_path = "/project/"
blueprint = unreal.EditorAssetLibrary.load_asset(blueprint_path)
if not blueprint:
print("ERROR: Could not load blueprint for verification")
return False
# Get default object for verification
try:
generated_class = blueprint.generated_class()
default_object = generated_class.get_default_object()
except:
try:
default_object = blueprint.get_default_object()
except:
try:
default_object = unreal.BlueprintEditorLibrary.get_blueprint_default_object(blueprint)
except:
print("ERROR: Could not get default object for verification")
return False
if not default_object:
print("ERROR: Default object is None for verification")
return False
# Check each animation slot
animation_properties = [
'IdleAnimation', 'MoveAnimation', 'AttackUpAnimation', 'AttackDownAnimation',
'AttackSideAnimation', 'AttackUp2Animation', 'AttackDown2Animation', 'AttackSide2Animation'
]
assigned_animations = 0
for prop_name in animation_properties:
try:
# Try to get the property value
animation_value = None
# Method 1: Direct attribute access
if hasattr(default_object, prop_name.lower()):
animation_value = getattr(default_object, prop_name.lower())
else:
# Method 2: Using get_editor_property
try:
animation_value = default_object.get_editor_property(prop_name)
except:
pass
if animation_value:
print(f"✓ {prop_name}: {animation_value.get_name()}")
assigned_animations += 1
else:
print(f"✗ {prop_name}: Not assigned")
except Exception as e:
print(f"✗ {prop_name}: Error checking - {e}")
print(f"\nVerification complete: {assigned_animations}/8 animations assigned")
return assigned_animations == 8
if __name__ == "__main__":
print("Starting animation assignment process...")
success = assign_animations_to_blueprint()
if success:
print("\n✓ Animation assignment completed!")
verify_assignments()
else:
print("\n✗ Animation assignment failed!")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example for getting the API instance and starting/creating the Houdini
Engine Session.
"""
def run():
# Get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
# Check if there is an existing valid session
if not api.is_session_valid():
# Create a new session
api.create_session()
if __name__ == '__main__':
run()
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" An example script that spawns some actors and then uses the API to
instantiate an HDA and set the actors as world inputs. The inputs are set
during post instantiation (before the first cook). After the first cook and
output creation (post processing) the input structure is fetched and logged.
"""
import unreal
_g_wrapper = None
def get_test_hda_path():
return '/project/.subnet_test_2_0'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def get_geo_asset_path():
return '/project/.Cube'
def get_geo_asset():
return unreal.load_object(None, get_geo_asset_path())
def get_cylinder_asset_path():
return '/project/.Cylinder'
def get_cylinder_asset():
return unreal.load_object(None, get_cylinder_asset_path())
def configure_inputs(in_wrapper):
print('configure_inputs')
# Unbind from the delegate
in_wrapper.on_post_instantiation_delegate.remove_callable(configure_inputs)
# Spawn some actors
actors = spawn_actors()
# Create a world input
world_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPIWorldInput)
# Set the input objects/assets for this input
world_input.set_input_objects(actors)
# copy the input data to the HDA as node input 0
in_wrapper.set_input_at_index(0, world_input)
# We can now discard the API input object
world_input = None
# Set the subnet_test HDA to output its first input
in_wrapper.set_int_parameter_value('enable_geo', 1)
def print_api_input(in_input):
print('\t\tInput type: {0}'.format(in_input.__class__))
print('\t\tbKeepWorldTransform: {0}'.format(in_input.keep_world_transform))
print('\t\tbImportAsReference: {0}'.format(in_input.import_as_reference))
if isinstance(in_input, unreal.HoudiniPublicAPIWorldInput):
print('\t\tbPackBeforeMerge: {0}'.format(in_input.pack_before_merge))
print('\t\tbExportLODs: {0}'.format(in_input.export_lo_ds))
print('\t\tbExportSockets: {0}'.format(in_input.export_sockets))
print('\t\tbExportColliders: {0}'.format(in_input.export_colliders))
print('\t\tbIsWorldInputBoundSelector: {0}'.format(in_input.is_world_input_bound_selector))
print('\t\tbWorldInputBoundSelectorAutoUpdate: {0}'.format(in_input.world_input_bound_selector_auto_update))
input_objects = in_input.get_input_objects()
if not input_objects:
print('\t\tEmpty input!')
else:
print('\t\tNumber of objects in input: {0}'.format(len(input_objects)))
for idx, input_object in enumerate(input_objects):
print('\t\t\tInput object #{0}: {1}'.format(idx, input_object))
def print_inputs(in_wrapper):
print('print_inputs')
# Unbind from the delegate
in_wrapper.on_post_processing_delegate.remove_callable(print_inputs)
# Fetch inputs, iterate over it and log
node_inputs = in_wrapper.get_inputs_at_indices()
parm_inputs = in_wrapper.get_input_parameters()
if not node_inputs:
print('No node inputs found!')
else:
print('Number of node inputs: {0}'.format(len(node_inputs)))
for input_index, input_wrapper in node_inputs.items():
print('\tInput index: {0}'.format(input_index))
print_api_input(input_wrapper)
if not parm_inputs:
print('No parameter inputs found!')
else:
print('Number of parameter inputs: {0}'.format(len(parm_inputs)))
for parm_name, input_wrapper in parm_inputs.items():
print('\tInput parameter name: {0}'.format(parm_name))
print_api_input(input_wrapper)
def spawn_actors():
actors = []
# Spawn a static mesh actor and assign a cylinder to its static mesh
# component
actor = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor, location=(0, 0, 0))
actor.static_mesh_component.set_static_mesh(get_cylinder_asset())
actor.set_actor_label('Cylinder')
actor.set_actor_transform(
unreal.Transform(
(-200, 0, 0),
(0, 0, 0),
(2, 2, 2),
),
sweep=False,
teleport=True
)
actors.append(actor)
# Spawn a static mesh actor and assign a cube to its static mesh
# component
actor = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor, location=(0, 0, 0))
actor.static_mesh_component.set_static_mesh(get_geo_asset())
actor.set_actor_label('Cube')
actor.set_actor_transform(
unreal.Transform(
(200, 0, 100),
(45, 0, 45),
(2, 2, 2),
),
sweep=False,
teleport=True
)
actors.append(actor)
return actors
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset with auto-cook enabled
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform())
# Configure inputs on_post_instantiation, after instantiation, but before first cook
_g_wrapper.on_post_instantiation_delegate.add_callable(configure_inputs)
# Bind on_post_processing, after cook + output creation
_g_wrapper.on_post_processing_delegate.add_callable(print_inputs)
if __name__ == '__main__':
run()
|
import unreal
#import unreal module
from typing import List
#import typing module for 'clearer' annotation
##But, this is not Typescript though, :(
_seek_parent_material: unreal.Material
#pick from editor
_seek_parent_material_class: object = _seek_parent_material.get_class()
compare_seek_parent_material_class = unreal.Material().get_class()
compare_class: object = unreal.MaterialInstanceConstant().get_class()
asset_lib: unreal.EditorAssetLibrary = unreal.EditorAssetLibrary
util_lib: unreal.EditorUtilityLibrary = unreal.EditorUtilityLibrary
if _seek_parent_material_class == compare_seek_parent_material_class :
selected_list : List[object] = util_lib.get_selected_assets()
for each in selected_list :
classof = each.get_class()
if classof == compare_class :
if each.parent == _seek_parent_material :
need_to_print: str = each.get_name() + ' => ' + _seek_parent_material.get_name() + ' => OK '
unreal.log(need_to_print)
else :
need_to_print: str = each.get_name() + ' ≠> ' + _seek_parent_material.get_name()
unreal.log_warning(need_to_print)
else :
need_to_print: str = each.get_name() + ' <≠> Not MaterialInstanceConstant '
unreal.log_error(need_to_print)
else:
unreal.log_error('Parent is not unreal.Material()')
|
# Copyright Epic Games, Inc. All Rights Reserved.
import os
import unreal
import p4utils
import flow.cmd
import unrealcmd
from peafour import P4
from pathlib import Path
#-------------------------------------------------------------------------------
class _SyncBase(unrealcmd.Cmd):
def _write_p4sync_txt_header(self, out):
out.write("# lines starting with a '#' are comments\n")
out.write("# lines prefixed with '-' are excluded from your sync\n")
out.write("# -.../Android/...\n")
out.write("# -/project/...\n")
out.write("# -*.uasset\n")
#-------------------------------------------------------------------------------
class Sync(_SyncBase):
""" Syncs the current project and engine directories to the given changelist
(or latest if none is provided). If the '--all' option is specified then the
branch will be searched locally for existing .uproject files, scheduling each
one to be synced.
The sync can be filtered with a .p4sync.txt file. Lines prefixed with a '-'
(e.g. "-.../SootySweep/...") will be excluded from the sync and anything
already synced is de-synced. The command will read .p4sync.txt files from two
locations;
1. <branch_root>/.p4sync.txt
2. ~/.ushell/.p4sync.txt (where ~ is USERPROFILE on Windows)
Quick edit the branch's .p4sync.txt file with '.p4 sync edit'. An example
of a .p4sync.txt file is as follows;
# a comment
-.../Android/...
-/project/...
-*.uasset """
changelist = unrealcmd.Arg(-1, "The changelist to sync to ('now' if unspecified)")
noresolve = unrealcmd.Opt(False, "Do not run 'p4 resolve -am' after syncing")
dryrun = unrealcmd.Opt(False, "Only pretend to do the sync")
all = unrealcmd.Opt(False, "Sync all the branch's projects found locally")
addprojs = unrealcmd.Opt("", "Comma-separated names of additional projects to sync")
clobber = unrealcmd.Opt(False, "Clobber writable files when syncing")
nosummary = unrealcmd.Opt(False, "Do not print the result-time summary at the end")
echo = unrealcmd.Opt(False, "Echo depot paths as they are synced")
def complete_addprojs(self, prefix):
# Fake a _local_root
local_root = Path(os.getcwd())
for parent in local_root.parents:
if (parent / "GenerateProjectFiles.bat").is_file():
local_root = parent
break
self._local_root = local_root
# Now we can fetch an approximate context and list it's projects
ue_context = self._try_get_unreal_context()
if not ue_context:
return
branch = ue_context.get_branch()
if not branch:
return
return (x.stem for x in branch.read_projects())
@unrealcmd.Cmd.summarise
def _main_summarised(self):
return self._main_impl()
def main(self):
self._client_spec_restore = None
try:
if self.args.nosummary:
return self._main_impl()
else:
return self._main_summarised()
finally:
try:
if self._client_spec_restore:
print("Restoring client spec")
client_spec = self._client_spec_restore
client_spec["Options"] = client_spec["Options"].replace("clobber", "noclobber")
client_spec["Description"] = client_spec["Description"].replace("{ushell_clobber_patch}", "")
P4.client(i=True).run(input_data=client_spec)
except:
pass
def _setup(self):
self.print_info("Perforce environment")
# Check there's a valid Perforce environment.
username = p4utils.login()
if not p4utils.ensure_p4config():
self.print_warning("Unable to establish a P4CONFIG")
# Get some info about the Perforce environment and show it to the user
info = P4.info().run()
print("Client:", info.clientName)
print(" User:", info.userName)
print("Server:", getattr(info, "proxyAddress", info.serverAddress))
# Inform the user if Perforce didn't find the client.
if info.clientName == "*unknown*":
client_name = p4utils.get_p4_set("P4CLIENT")
_, p4config_name = p4utils.has_p4config(".")
raise EnvironmentError(f"Client '{client_name}' not found. Please check P4CLIENT setting in '{p4config_name}'")
# So that P4.where can succeed we sync one known file first. This also
# ensures we can accomodate an unsynced stream switch.
for x in P4.sync(f"//{info.clientName}/GenerateProjectFiles.bat").read(on_error=False):
pass
# Find the root of the current branch
self.print_info("Discovering branch root")
branch_root = p4utils.get_branch_root(f"//{info.clientName}/project/")
print("Branch root:", branch_root)
# Map branch root somewhere on the local file system
local_root = P4.where(branch_root + "X").path
local_root = local_root[:-1] # to strip 'X'
print("Local root:", local_root)
self._info = info
self._branch_root = branch_root
self._local_root = local_root
def _try_get_unreal_context(self):
try:
ue_context = self.get_unreal_context()
branch = ue_context.get_branch()
# If branch doesn't match os.getcwd() then ditch it
if not (branch and branch.get_dir().samefile(self._local_root)):
raise EnvironmentError
except EnvironmentError:
try:
cwd = os.getcwd()
ue_context = unreal.Context(cwd)
except EnvironmentError:
ue_context = None
return ue_context
def _add_paths(self, syncer):
# Add the set of paths that all syncs should include
syncer.add_path(self._local_root + "*")
syncer.add_path(self._local_root + "Engine/...")
templates = self._local_root + "Templates/..."
if P4.files(templates, m=1).run(on_error=lambda x: None) is not None:
syncer.add_path(templates)
# If we've a valid context by this point we can try and use it.
glob_for_projects = False
self._current_cl = 0
if ue_context := self._try_get_unreal_context():
project = ue_context.get_project()
if self.args.all or not project:
if branch := ue_context.get_branch():
project_count = 0
self.print_info("Syncing all known projects")
for uproj_path in branch.read_projects():
print(uproj_path.stem)
syncer.add_path(str(uproj_path.parent) + "/...")
project_count += 1
# If we have somehow managed to not find any projects then
# fallback to globbing for them.
if not project_count:
print("No projects found via .uprojectdirs")
print("Falling back to a glob")
glob_for_projects = True
else:
# By default the active project is synced
self.print_info("Single project sync")
print("Project:", project.get_name())
syncer.add_path(str(project.get_dir()) + "/...")
# Extra projects
if self.args.addprojs and not self.args.all:
add_projects = self.args.addprojs.replace("/", ",")
add_projects = (x.strip() for x in add_projects.split(","))
add_projects = {x for x in add_projects if x}
known_projects = list(ue_context.get_branch().read_projects())
known_projects = {x.stem.lower():x for x in known_projects}
self.print_info("Additional projects to sync;")
for add_project in add_projects:
print(add_project, ": ", sep="", end="")
add_project = add_project.lower()
if add_project not in known_projects:
print("not found")
continue
add_project = known_projects[add_project]
add_project = add_project.parent
syncer.add_path(str(add_project) + "/...")
print(add_project)
engine_info = ue_context.get_engine().get_info()
self._current_cl = engine_info.get("Changelist", 0)
else:
glob_for_projects = True
if glob_for_projects:
# There does not appear to be a fully formed branch so we will infer
# `--all` here on behalf of the user.
self.print_info("Syncing all projects by **/.uproject")
for uproj_path in Path(self._local_root).glob("**/*.uproject"):
print(uproj_path.stem)
syncer.add_path(str(uproj_path.parent) + "/...")
def _main_impl(self):
self._setup()
# Determine the changelist to sync
sync_cl = self.args.changelist
if sync_cl < 0:
sync_cl = int(P4.changes(self._branch_root + "...", m=1).change)
# Remove "noclobber" from the user's client spec
client = P4.client(o=True).run()
client_spec = client.as_dict()
client_spec.setdefault("Description", "")
if self.args.clobber:
self.print_info("Checking for 'noclobber'")
if "noclobber" in client_spec["Options"]:
client_spec["Options"] = client_spec["Options"].replace("noclobber", "clobber")
client_spec["Description"] += "{ushell_clobber_patch}"
self._client_spec_restore = client_spec.copy()
if not self.args.dryrun or True:
print(f"Patching {client.Client} with 'clobber'")
P4.client(i=True).run(input_data=client_spec)
else:
print("Clobbering is already active")
if not self._client_spec_restore:
if "{ushell_clobber_patch}" in client_spec["Description"]:
if "noclobber" not in client_spec["Options"]:
self._client_spec_restore = client_spec.copy()
# Add the paths we always want to sync
syncer = p4utils.Syncer()
self._add_paths(syncer)
# Load and parse the .p4sync.txt file
self._apply_p4sync_txt(syncer)
version_cl = 0
build_ver_path = self._local_root + "Engine/project/.version"
try:
# Special case to force sync Build.version. It can get easily modified
# without Perforce's knowledge, complicating the sync.
if not self.args.dryrun:
P4.sync(build_ver_path + "@" + str(sync_cl), qf=True).run(on_error=False)
# GO!
self.print_info("Scheduling sync")
print("Changelist:", sync_cl, f"(was {self._current_cl})")
print("Requesting... ", end="")
syncer.schedule(sync_cl)
self.print_info("Syncing")
ok = syncer.sync(dryrun=self.args.dryrun, echo=self.args.echo)
if self.args.dryrun or not ok:
return ok
# Sync succeeded, update cl for build.version even if something goes wrong with resolving
version_cl = sync_cl
# Auto-resolve on behalf of the user.
if not self.args.noresolve:
conflicts = set()
self.print_info("Resolving")
for item in P4.resolve(am=True).read(on_error=False):
path = getattr(item, "fromFile", None)
if not path:
continue
path = path[len(self._branch_root):]
if getattr(item, "how", None):
conflicts.remove(path)
print(path)
else:
conflicts.add(path)
for conflict in conflicts:
print(flow.cmd.text.light_red(conflict))
except KeyboardInterrupt:
print()
if not self.args.dryrun:
self.print_warning(f"Sync interrupted! Writing build.version to CL {version_cl}")
return False
finally:
if not self.args.dryrun:
# Record the synced changelist in Build.version
with open(build_ver_path, "r") as x:
lines = list(x.readlines())
import stat
build_ver_mode = os.stat(build_ver_path).st_mode
os.chmod(build_ver_path, build_ver_mode|stat.S_IWRITE)
with open(build_ver_path, "w") as x:
for line in lines:
if r'"Changelist"' in line:
line = line.split(":", 2)
line = line[0] + f": {version_cl},\n"
elif r'"BranchName"' in line:
line = "\t\"BranchName\": \"X\"\n"
line = line.replace("X", self._branch_root[:-1].replace("/", "+"))
x.write(line)
def _apply_p4sync_txt(self, syncer):
def impl(path):
print("Source:", os.path.normpath(path), end="")
try:
sync_config = open(path, "rt")
print()
except:
print(" ... not found")
return
def read_exclusions():
for line in map(str.strip, sync_config):
if line.startswith("-"): yield line[1:]
elif line.startswith("$-"): yield line[2:]
for i, line in enumerate(read_exclusions()):
view = None
if line.startswith("*."): view = ".../" + line
elif line.startswith("/"): view = line[1:]
elif line.startswith("..."): view = line
print(" %2d" % i, "exclude", end=" ")
if view and (view.count("/") or "/*." in view or view.startswith("*.")):
view = self._branch_root + view
syncer.add_exclude(view)
print(view)
else:
view = view or line
print(flow.cmd.text.light_yellow(view + " (ill-formed)"))
sync_config.close()
self.print_info("Applying .p4sync.txt")
for dir in (self.get_home_dir(), self._local_root):
impl(dir + ".p4sync.txt")
#-------------------------------------------------------------------------------
class Edit(_SyncBase):
""" Opens .p4sync.txt in an editor. The editor is selected from environment
variables P4EDITOR, GIT_EDITOR, and the system default editor. """
def main(self):
username = p4utils.login()
cwd = os.getcwd()
client = p4utils.get_client_from_dir(cwd, username)
if not client:
raise EnvironmentError(f"Unable to establish the clientspec from '{cwd}'")
_, root_dir = client
path = Path(root_dir) / ".p4sync.txt"
if not path.is_file():
with path.open("wt") as out:
self._write_p4sync_txt_header(out)
print("Editing", path)
self.edit_file(path)
|
# -*- coding: utf-8 -*-
"""
可以输入带提示的 下拉菜单 组件
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__author__ = "timmyliang"
__email__ = "[email protected]"
__date__ = "2020-07-18 21:09:23"
from ue_util import toast
from Qt import QtCore, QtWidgets, QtGui
from Qt.QtCompat import loadUi
import unreal
import sys
import os
MODULE = os.path.dirname(__file__)
sys.path.insert(0, MODULE) if MODULE not in sys.path else None
class USelector(QtWidgets.QWidget):
_class_filter = []
def __init__(self, parent=None, class_filter=None):
super(USelector, self).__init__(parent)
self.class_filter = class_filter
DIR = os.path.dirname(__file__)
ui_path = os.path.join(DIR, "ue_selector.ui")
loadUi(ui_path, self)
self.initilize()
def initilize(self):
# NOTE 设置图标
style = QtWidgets.QApplication.style()
icon = style.standardIcon(QtWidgets.QStyle.SP_ArrowBack)
self.UGet.setIcon(icon)
icon = style.standardIcon(QtWidgets.QStyle.SP_FileDialogContentsView)
self.UFind.setIcon(icon)
icon = style.standardIcon(QtWidgets.QStyle.SP_BrowserStop)
self.UClear.setIcon(icon)
self.USelector.popup.connect(self.list_asset)
self.UGet.clicked.connect(self.get_asset)
self.UFind.clicked.connect(self.sync_asset)
self.UClear.clicked.connect(
lambda: self.USelector.setCurrentIndex(self.USelector.count() - 1)
)
# TODO 无法获取 thunmnail 暂时隐藏
self.UThumbnail.hide()
@property
def class_filter(self):
return self._class_filter
@class_filter.setter
def class_filter(self, filter):
self._class_filter = (
filter if isinstance(filter, list) else [filter] if filter else []
)
if self._class_filter:
self.list_asset()
def filter_append(self, _filter):
if not _filter:
return
elif isinstance(_filter, list):
self._class_filter.extend(_filter)
elif isinstance(_filter, dict):
self._class_filter.extend(_filter.keys())
else:
self._class_filter.append(_filter)
self.list_asset()
def list_asset(self):
reg = unreal.AssetRegistryHelpers.get_asset_registry()
n_list = [c.__name__ for c in self.class_filter]
ar_filter = unreal.ARFilter(class_names=n_list, recursive_classes=True)
assets = reg.get_assets(ar_filter)
assets_path = [str(a.object_path) for a in assets]
self.USelector.clear()
self.USelector.addItems(assets_path)
self.USelector.addItem("None")
return assets_path
def get_asset(self):
selected_asset = [
a
for a in unreal.EditorUtilityLibrary.get_selected_assets()
for cls_type in self.class_filter
if isinstance(a, cls_type)
]
if not selected_asset:
toast(
u"请选择下列类型\n %s" % ("\n".join([c.__name__ for c in self.class_filter]))
)
return
selected_asset = selected_asset[0]
count = self.USelector.count()
for i in range(count):
text = self.USelector.itemText(i)
if text == selected_asset.get_path_name():
self.USelector.setCurrentIndex(i)
break
def sync_asset(self):
path = self.USelector.currentText()
unreal.EditorAssetLibrary.sync_browser_to_objects([path])
def get_path(self):
text = self.USelector.currentText()
return None if text == "None" else text
def main():
selector = USelector(class_filter=[unreal.Material])
selector.show()
if __name__ == "__main__":
main()
|
import unreal
import os
import json
# ======= CONFIGURATION =======
JSON_FOLDER = r"/project/"
MATERIAL_SEARCH_FOLDER = '/project/' # Folder to search for materials
DEFAULT_MATERIAL_PATH = '/project/' # Fallback material
# ==============================
def load_material_by_name(name, slot_index=0):
assets = unreal.EditorAssetLibrary.list_assets(MATERIAL_SEARCH_FOLDER, recursive=True, include_folder=False)
for asset_path in assets:
if asset_path.endswith(name) or asset_path.endswith(name + '.' + name):
asset = unreal.EditorAssetLibrary.load_asset(asset_path)
if isinstance(asset, unreal.MaterialInterface):
return asset
# Fallback
default_mat = unreal.EditorAssetLibrary.load_asset(DEFAULT_MATERIAL_PATH)
if default_mat and isinstance(default_mat, unreal.MaterialInterface):
unreal.log_warning(f"Material '{name}' not found for slot {slot_index}, using fallback.")
return default_mat
unreal.log_error(f"Material '{name}' not found and fallback material is invalid.")
return None
def find_all_static_meshes():
meshes = []
assets = unreal.EditorAssetLibrary.list_assets('/Game', recursive=True, include_folder=False)
for asset_path in assets:
asset = unreal.EditorAssetLibrary.load_asset(asset_path)
if isinstance(asset, unreal.StaticMesh):
meshes.append(asset)
return meshes
def load_json_for_mesh(mesh_name):
json_path = os.path.join(JSON_FOLDER, mesh_name + ".json")
if os.path.exists(json_path):
with open(json_path, "r") as f:
data = json.load(f)
return data
return None
def get_material_names_from_json(json_data):
material_names = []
if isinstance(json_data, list):
for entry in json_data:
if isinstance(entry, dict) and entry.get("Type") == "StaticMesh":
props = entry.get("Properties")
if isinstance(props, dict):
mats = props.get("StaticMaterials", [])
for mat in mats:
if isinstance(mat, dict):
mat_interface = mat.get("MaterialInterface")
if isinstance(mat_interface, dict):
object_name = mat_interface.get("ObjectName", "")
if isinstance(object_name, str) and (
object_name.startswith("Material'") or object_name.startswith("MaterialInstanceConstant'")
):
material_name = object_name.split("'")[1].split(".")[0].split("/")[-1]
material_names.append(material_name)
return material_names
def main():
meshes = find_all_static_meshes()
total_meshes = len(meshes)
with unreal.ScopedSlowTask(total_meshes, 'Assigning materials to meshes...') as task:
task.make_dialog(True)
for i, mesh in enumerate(meshes):
if task.should_cancel():
break
mesh_name = os.path.basename(mesh.get_name())
remaining = total_meshes - i
task.enter_progress_frame(1, f"[{remaining} remaining] Processing: {mesh_name}")
json_data = load_json_for_mesh(mesh_name)
if not json_data:
unreal.log_warning(f"Skipping {mesh_name} (no JSON found)")
continue
material_names = get_material_names_from_json(json_data)
if not material_names:
unreal.log_warning(f"Skipping {mesh_name} (no materials found in JSON)")
continue
for idx, material_name in enumerate(material_names):
material_asset = load_material_by_name(material_name, idx)
if material_asset:
mesh.set_material(idx, material_asset)
unreal.EditorAssetLibrary.save_asset(mesh.get_path_name())
unreal.log(f"Assigned material '{material_name}' to slot {idx} on '{mesh_name}'")
unreal.log("Material assignment finished.")
main()
|
import posixpath
import unreal
from unreal import EditorAssetLibrary as asset_lib
from unreal import PyToolkitBPLibrary as py_lib
from unreal import EditorLoadingAndSavingUtils as ls_utils
asset_tool = unreal.AssetToolsHelpers.get_asset_tools()
def create_asset(asset_path="", unique_name=True, asset_class=None, asset_factory=None):
if unique_name:
asset_path, _ = asset_tool.create_unique_asset_name(asset_path, "")
if not asset_lib.does_asset_exist(asset_path=asset_path):
path, name = posixpath.split(asset_path)
return asset_tool.create_asset(
asset_name=name,
package_path=path,
asset_class=asset_class,
factory=asset_factory,
)
return unreal.load_asset(asset_path)
# path = '/project/.NewEditorUtilityWidgetBlueprint1'
# editor_widget_bp = unreal.load_object(None,path)
# canvas = py_lib.add_root_widget(tree)
@unreal.uclass()
class CustomEditorWidget(unreal.EditorUtilityWidget):
pass
factory = unreal.EditorUtilityWidgetBlueprintFactory()
factory.set_editor_property("parent_class", CustomEditorWidget)
editor_widget_bp = create_asset(
'/project/',
False,
unreal.EditorUtilityWidgetBlueprint,
factory,
)
asset_lib.save_asset(editor_widget_bp.get_path_name())
ls_utils.reload_packages([editor_widget_bp.get_outer()])
# asset_lib.consolidate_assets(editor_widget_bp,[editor_widget_bp])
canvas = unreal.load_object(
None, "%s_C:WidgetTree.CanvasPanel_0" % editor_widget_bp.get_path_name()
)
print("canvas",canvas)
|
import unreal
import os
import scripts.utils.editorFuncs as editorFuncs
from scripts.config.params import Config
import scripts.state.stateManagerScript as stateManagerScript
import scripts.utils.popUp as popUp
from scripts.utils.logger import RecordingLog, guess_gloss_from_filename
_log = RecordingLog()
params = Config()
class LiveLinkFaceCSVWriterComponent:
"""
Custom component to write Live Link Face data to CSV files.
"""
def __init__(self, statemanager : stateManagerScript.StateManager, subj_name: str = "iPhone"):
super().__init__()
# Find the Actor in your level that has the CSV‐writer component
my_actor = editorFuncs.get_actor_by_name(params.actor_name)
if not my_actor:
unreal.log_error("[CSVWriter] Actor not found!")
raise RuntimeError
self.CSVWriterComp = my_actor.get_component_by_class(unreal.LiveLinkFaceCSVWriterComponent)
self.CSVWriterComp.set_subject_name(unreal.Name(subj_name))
self.subj_name = subj_name
self.set_save_folder(statemanager.folder)
self.CSVWriterComp.set_filename("MyCaptureData")
self.last_file_path = None
def start_recording(self):
success = self.CSVWriterComp.start_recording()
if success:
unreal.log("[CSVWriter] Recording started")
else:
unreal.log_error("[CSVWriter] Failed to start recording")
def stop_recording(self):
self.CSVWriterComp.stop_recording()
def export_file(self):
success = self.CSVWriterComp.export_file()
if success:
unreal.log("[CSVWriter] CSV file exported successfully")
csv_path = self.last_file_path
gloss = guess_gloss_from_filename(csv_path)
_log.add_asset(gloss, "blendshape_csv", csv_path, machine="UE", status="ready")
else:
unreal.log_error("[CSVWriter] Failed to export CSV file")
return success
def set_save_folder(self, folder: str):
# Normalize and ensure the directory exists before setting it
folder = os.path.normpath(folder or "")
try:
os.makedirs(folder, exist_ok=True)
except Exception as e:
unreal.log_error(f"[CSVWriter] Could not create folder '{folder}': {e}")
self.CSVWriterComp.set_save_folder(folder)
unreal.log(f"[CSVWriter] Save folder set to: {folder}")
return folder
def set_filename(self, filename: str):
self.CSVWriterComp.set_filename(filename)
unreal.log(f"[CSVWriter] Filename set to: {filename}")
folder = os.path.normpath(self.CSVWriterComp.get_save_folder())
self.last_file_path = os.path.join(folder, f"{filename}.csv")
return filename
def check_last_file(self):
"""
Check if the last exported file exists and return its path.
"""
if not self.last_file_path:
popUp.show_popup_message("[CSVWriter] Error", "No last_file_path set in CSVWriter.")
return None
return self.check_file_minimum_rows(self.last_file_path)
def check_file_minimum_rows(self, file_path, min_rows: int = 50):
"""
Check if the file CSV file has at least `min_rows` rows.
"""
if not file_path:
unreal.log_error("[CSVWriter] No file path set. Please export a file first.")
return False
file_path = os.path.normpath(file_path)
try:
with open(file_path, 'r') as f:
lines = f.readlines()
if len(lines) < min_rows:
popUp.show_popup_message("[CSVWriter] Warning", f"File '{file_path}' has only {len(lines)} rows. Check the iPhone.")
return False
else:
unreal.log(f"[CSVWriter] File '{file_path}' has sufficient rows: {len(lines)}")
return True
except Exception as e:
unreal.log_error(f"[CSVWriter] Error checking file: {e}")
return False
# Doesnt work currently, needs to be fixed
# def check_cur_subject_available(self):
# """
# Check if the subject is available in the Live Link system.
# """
# if self.CSVWriterComp.is_subject_available():
# unreal.log(f"[CSVWriter] Subject '{self.subj_name}' is available.")
# return True
# popUp.show_popup_message("[CSVWriter] Error", f"Subject '{self.subj_name}' is not available. Make sure the iPhone is connected and the Live Link Face app is running. Restart UE.")
# return False
|
import logging
import unreal
class UnrealLogHandler(logging.Handler):
"""
A logging.Handler that sends log messages into the Unreal Output Log.
Depending on record.levelno, it will call unreal.log, unreal.log_warning, or unreal.log_error.
"""
def __init__(self):
super().__init__()
def emit(self, record):
try:
msg = self.format(record)
level = record.levelno
# Map Python logging levels to Unreal log calls
if level >= logging.ERROR:
unreal.log_error(msg)
elif level >= logging.WARNING:
unreal.log_warning(msg)
else:
unreal.log(msg)
except Exception as e:
# If something goes wrong in logging, fallback to printing
print(f"[UnrealLogHandler] failed to emit record: {e}")
|
import unreal
import json
def generate_scene_layout(demand):
path = "./format_json.json"
from openai import OpenAI
client = OpenAI()
gpt_model = "gpt-3.5-turbo"
completion = client.chat.completions.create(model=gpt_model,
messages=[{"role": "system", "content": '''\
假设你是一名使用UE5引擎进行关卡开发的游戏内环境设计师,\
在接下来的场景中,会给定一个场景主题和场景大小,\
要求列出环境内存在哪些物品,\
物品摆放的坐标和物品根据其使用环境的缩放系数以及根据其形态和功能决定的物体朝向,可以存在多个同名物体,但是它们的需要独立列出坐标\
输出到表格中,并整理成JSON格式。
对于每一个物品,它应当独立地列出name:str、position:UE坐标系下的空间坐标,1*3数组、scale:UE坐标系下的空间向量,1*3数组,分别对应x、y、z方向上的缩放、rotation:UE坐标下的向量,1*3数组,代表物品朝向的法向量方向 四项,形如:
items:[
{
"name": "Counter1",
"position": [2, 18, 0],
"scale": [1, 1, 1],
"rotation": [0, 0, 0]
},
{
"name": "Counter2",
"position": [2, 18, 0],
"scale": [1, 1, 1],
"rotation": [0, 0, 0]
},
]
\
'''},
{"role": "user", "content": demand}]
)
res, _, _, _ = completion.choices[0].message
print(res)
with open(path, 'w') as write_f:
write_f.write(res[1])
return json.loads(res[1])
def get_all_meshes_from_scene_design(layout):
layout = layout["items"]
file_list = []
for i in layout:
# print(i)
name, pos, scale, rot =[item[1] for item in i.items() ]
# print(f"{name}:{pos},{scale},{rot}")
file_list.append([name,pos,scale,rot])
return file_list
def load_asset(asset_path):
"""
Load an asset from the given path.
"""
asset = unreal.EditorAssetLibrary.load_asset(asset_path)
if not asset:
unreal.log_error(f"Failed to load asset from path: {asset_path}")
return asset
def build_transformation(loc,rot,scale):
pos_x, pos_y, pos_z = loc
rot_x, rot_y, rot_z = rot
sca_x, sca_y, sca_z = scale
actor_location = unreal.Vector(pos_x * 50, pos_y * 50, pos_z * 50)
actor_rotation = unreal.Rotator(rot_x, rot_y, rot_z)
actor_scale = unreal.Vector(sca_x, sca_y, sca_z)
return actor_location, actor_rotation, actor_scale
def spawn_actor_from_asset(asset,loc,rot,scale):
"""
Spawn an actor from the given asset.
"""
if not asset:
unreal.log_error("Cannot spawn actor because the asset is invalid.")
return None
editor_subsystem = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
world = editor_subsystem.get_editor_world()
if not world:
unreal.log_error("Failed to get the editor world.")
return None
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(asset,loc,rot)
if not actor:
unreal.log_error("Failed to spawn actor from asset.")
actor.set_actor_scale3d(scale)
return actor
def search_or_generate(name):
name = name.lower()
if name.startswith('chair'):
f_path = "/project/.SM_Chair"
elif name.startswith("table"):
f_path = '/project/.SM_TableRound'
elif name.startswith('sofa'):
f_path = '/project/.SM_Couch'
elif name.startswith('bar'):
f_path = '/project/.SM_Classic_table'
elif name.startswith('chandelier'):
f_path = '/project/'
elif name.startswith('paint'):
f_path = '/project/.SM_Picture'
elif name.startswith('decorative painting'):
f_path = '/project/'
elif name.startswith('customer'):
pass
else:
f_path = "/project/"
return f_path
if __name__ == "__main__":
# 用户输入需求
user_demand = """设计一个布景大小为20m*20m的咖啡馆场景,需要有四个桌子(table),每个桌子配两个凳子(chair),两个沙发(sofa)和一个吧台(bar),还需要一些景观画(paint)
"""
# GPT生成场景设计
layout = generate_scene_layout(user_demand)
# 获取全部的mesh描述
meshes = get_all_meshes_from_scene_design(layout)
for mesh in meshes:
# 寻找符合描述的mesh,如果不存在,则使用T2M模型进行生成
mesh_path = search_or_generate(name = mesh[0])
# 加载asset至ue
asset = load_asset(mesh_path)
actor_location, actor_rotation,actor_scale = build_transformation(mesh[1],mesh[3],mesh[2])
spawned_actor = spawn_actor_from_asset(asset, actor_location, actor_rotation, actor_scale)
if spawned_actor:
unreal.log("Successfully spawned actor from asset.")
else:
unreal.log_error("Failed to spawn actor from asset.")
|
# /project/
# @CBgameDev Optimisation Script - Log No Collision Static Meshes
# /project/
import unreal
import os
EditAssetLib = unreal.EditorAssetLibrary()
StatMeshLib = unreal.EditorStaticMeshLibrary()
workingPath = "/Game/" # Using the root directory
notepadFilePath = os.path.dirname(__file__) + "//PythonOptimiseLog.txt"
allAssets = EditAssetLib.list_assets(workingPath, True, False)
selectedAssetsPath = workingPath
LogStringsArray = []
numOfOptimisations = 0
with unreal.ScopedSlowTask(len(allAssets), selectedAssetsPath) as ST:
ST.make_dialog(True)
for asset in allAssets:
_assetData = EditAssetLib.find_asset_data(asset)
_assetName = _assetData.get_asset().get_name()
_assetPathName = _assetData.get_asset().get_path_name()
_assetClassName = _assetData.get_asset().get_class().get_name()
if _assetClassName == "StaticMesh":
_staticMeshAsset = unreal.StaticMesh.cast(EditAssetLib.load_asset(asset))
_collisionComplexity = StatMeshLib.get_collision_complexity(_staticMeshAsset)
_TotalNumCollisionElements = StatMeshLib.get_simple_collision_count(_staticMeshAsset) + StatMeshLib.get_convex_collision_count(_staticMeshAsset)
if _TotalNumCollisionElements <= 0:
LogStringsArray.append(" %s ------------> At Path: %s \n" % (_assetName, _assetPathName))
# unreal.log("Asset Name: %s Path: %s \n" % (_assetName, _assetPathName))
numOfOptimisations += 1
if ST.should_cancel():
break
ST.enter_progress_frame(1, asset)
# Write results into a log file
# /project/
TitleOfOptimisation = "Log No Collision Static Meshes"
DescOfOptimisation = "Searches the entire project for static mesh assets that do not have any collision setup"
SummaryMessageIntro = "-- Static Mesh Assets Which Do Not Have Collision --"
if unreal.Paths.file_exists(notepadFilePath): # Check if txt file already exists
os.remove(notepadFilePath) # if does remove it
# Create new txt file and run intro text
file = open(notepadFilePath, "a+") # we should only do this if have a count?
file.write("OPTIMISING SCRIPT by @CBgameDev \n")
file.write("==================================================================================================== \n")
file.write(" SCRIPT NAME: %s \n" % TitleOfOptimisation)
file.write(" DESCRIPTION: %s \n" % DescOfOptimisation)
file.write("==================================================================================================== \n \n")
if numOfOptimisations <= 0:
file.write(" -- NONE FOUND -- \n \n")
else:
for i in range(len(LogStringsArray)):
file.write(LogStringsArray[i])
# Run summary text
file.write("\n")
file.write("======================================================================================================= \n")
file.write(" SUMMARY: \n")
file.write(" %s \n" % SummaryMessageIntro)
file.write(" Found: %s \n \n" % numOfOptimisations)
file.write("======================================================================================================= \n")
file.write(" Logged to %s \n" % notepadFilePath)
file.write("======================================================================================================= \n")
file.close()
os.startfile(notepadFilePath) # Trigger the notepad file to open
|
import os
from typing import Callable, Dict, List, Optional, Tuple
import unreal
import utils
from constants import (
ENGINE_MAJOR_VERSION,
ENGINE_MINOR_VERSION,
MATERIAL_PATHS,
PROJECT_ROOT,
RenderJobUnreal,
RenderPass,
SubSystem,
UnrealRenderLayerEnum,
)
# define the post process material to the render_pass
material_paths = MATERIAL_PATHS
material_path_keys = [key.lower() for key in material_paths.keys()]
class CustomMoviePipeline:
"""This class contains several class methods, which are used to control the movie
pipeline (Movie Render Queue), including:
- clearQueue: Clear the movie render queue.
- addJobToQueueWithYAMLConfig: Add a job to the movie render queue with yaml config.
- renderQueue: Render the jobs in movie render queue.
- ...
"""
subsystem = SubSystem.MoviePipelineQueueSub
pipeline_queue = subsystem.get_queue()
host = '127.0.0.1'
socket_port = int(os.environ.get('SOCKET_PORT', 9999))
jobs: List[str] = []
executor = None
@classmethod
def init_executor(cls):
if cls.executor is None:
cls.executor = unreal.MoviePipelinePIEExecutor()
cls.executor.connect_socket(cls.host, cls.socket_port)
cls.log_msg_with_socket('Socket of Unreal Renderer Connected')
@classmethod
def close_executor(cls):
if cls.executor is not None:
cls.executor.disconnect_socket()
cls.executor = None
@classmethod
def clear_queue(cls) -> None:
for job in cls.pipeline_queue.get_jobs():
unreal.log('Deleting job ' + job.job_name)
cls.pipeline_queue.delete_job(job)
cls.jobs.clear()
if cls.executor is not None:
unreal.log('Disconnecting socket')
cls.executor.disconnect_socket()
cls.executor = None
unreal.log('Queue cleared')
@classmethod
def save_queue(cls, path: str) -> None:
"""Save the movie render queue."""
unreal.MoviePipelineEditorLibrary.save_queue_to_manifest_file(cls.pipeline_queue)
manifest_file = PROJECT_ROOT / 'Saved/project/.utxt'
manifest_str = unreal.MoviePipelineEditorLibrary.convert_manifest_file_to_string(manifest_file.as_posix())
with open(path, 'w') as f:
f.write(manifest_str)
unreal.log(f"Saved queue to '{path}' as a manifest file.")
@classmethod
def get_job_length(cls) -> int:
return len(cls.pipeline_queue.get_jobs())
@staticmethod
def get_output_path(movie_preset: unreal.MoviePipelineMasterConfig) -> str:
"""Get output path from a movie preset.
Args:
movie_preset (unreal.MoviePipelineMasterConfig): The movie preset to get output path from.
Returns:
str: The output path.
"""
output_config: unreal.MoviePipelineOutputSetting = movie_preset.find_or_add_setting_by_class(
unreal.MoviePipelineOutputSetting
)
return output_config.output_directory.path
@staticmethod
def set_render_all_cameras(movie_preset: unreal.MoviePipelineMasterConfig, enable: bool = True) -> None:
camera_setting: unreal.MoviePipelineCameraSetting = movie_preset.find_or_add_setting_by_class(
unreal.MoviePipelineCameraSetting
)
camera_setting.render_all_cameras = enable
@staticmethod
def set_export_audio(movie_preset: unreal.MoviePipelineMasterConfig) -> None:
export_setting: unreal.MoviePipelineWaveOutput = movie_preset.find_or_add_setting_by_class(
unreal.MoviePipelineWaveOutput
)
@staticmethod
def add_render_passes(
movie_preset: unreal.MoviePipelineMasterConfig,
render_passes: List[RenderPass],
accumulator_includes_alpha: bool = False,
) -> None:
"""Add render passes to a movie preset.
Args:
movie_preset (unreal.MoviePipelineMasterConfig): The movie preset to add render passes to.
render_passes (List[RenderPass]): The render passes to add.
The available render passes are defined in `UnrealRenderLayerEnum`: `rgb`, `depth`, `mask`, \
`flow`, `diffuse`, `normal`, `metallic`, `roughness`, `specular`, `tangent`, `basecolor`
accumulator_includes_alpha (bool): Whether the accumulator includes alpha.
https://dev.epicgames.com/project/-us/unreal-engine/python-api/project/#unreal.MoviePipelineDeferredPassBase.accumulator_includes_alpha
"""
deferred_config = movie_preset.find_or_add_setting_by_class(unreal.CustomMoviePipelineDeferredPass)
render_pass_config = movie_preset.find_or_add_setting_by_class(unreal.CustomMoviePipelineOutput)
# set alpha
deferred_config.accumulator_includes_alpha = accumulator_includes_alpha
# add render passes
additional_render_passes = []
for render_pass in render_passes:
pass_name = render_pass.render_layer.value
enable = True
ext = getattr(unreal.CustomImageFormat, render_pass.image_format.value.upper()) # convert to unreal enum
if pass_name.lower() == UnrealRenderLayerEnum.img.value.lower():
render_pass_config.enable_render_pass_rgb = enable
render_pass_config.render_pass_name_rgb = pass_name
render_pass_config.extension_rgb = ext
elif pass_name.lower() in material_path_keys:
# material = unreal.SoftObjectPath(material_map[pass_name])
material = unreal.load_asset(material_paths[pass_name.lower()])
_pass = unreal.CustomMoviePipelineRenderPass(
enabled=enable, material=material, render_pass_name=pass_name, extension=ext
)
additional_render_passes.append(_pass)
render_pass_config.additional_render_passes = additional_render_passes
@staticmethod
def add_output_config(
movie_preset: unreal.MoviePipelineMasterConfig,
resolution: Tuple[int, int] = [1920, 1080],
file_name_format: Optional[str] = None,
output_path: Optional[str] = None,
) -> None:
"""Add output config to a movie preset.
Args:
movie_preset (unreal.MoviePipelineMasterConfig): The movie preset to add output config to.
resolution (list): Resolution of the output, e.g. [1920, 1080].
file_name_format (str): Format of the output file name, e.g. '{sequence_name}/{render_pass}/{frame_number}'
output_path (str): Path of the output, e.g. 'E:/output'
"""
# find or add setting
output_config = movie_preset.find_or_add_setting_by_class(unreal.MoviePipelineOutputSetting)
# add resolution settings
output_config.output_resolution = unreal.IntPoint(resolution[0], resolution[1])
# add file name format settings
if file_name_format:
output_config.file_name_format = file_name_format
# set output path
if output_path is not None:
output_config.output_directory = unreal.DirectoryPath(output_path)
@staticmethod
def add_console_command(
movie_preset: unreal.MoviePipelineMasterConfig,
console_variables: Dict[str, float] = {'r.MotionBlurQuality': 0.0},
) -> None:
"""Add console command to a movie preset. Now only support motion blur.
Args:
movie_preset (unreal.MoviePipelineMasterConfig): The movie preset to add console command to.
console_variables (dict): Console variables.
"""
# find or add setting
console_config: unreal.MoviePipelineConsoleVariableSetting = movie_preset.find_or_add_setting_by_class(
unreal.MoviePipelineConsoleVariableSetting
)
if ENGINE_MAJOR_VERSION >= 5 and ENGINE_MINOR_VERSION >= 2:
for key, value in console_variables.items():
console_config.add_or_update_console_variable(key, value)
else:
console_config.console_variables = console_variables
@staticmethod
def add_anti_alias(
movie_preset: unreal.MoviePipelineMasterConfig,
anti_alias: RenderJobUnreal.AntiAliasSetting = RenderJobUnreal.AntiAliasSetting(),
) -> None:
"""Add anti-alias settings to a movie preset.
Args:
movie_preset (unreal.MoviePipelineMasterConfig): The movie preset to add anti-alias settings to.
anti_alias (dict): Anti-alias settings.
"""
if not anti_alias.enable:
return
# add anti_alias settings
anti_alias_config: unreal.MoviePipelineAntiAliasingSetting = movie_preset.find_or_add_setting_by_class(
unreal.MoviePipelineAntiAliasingSetting
)
anti_alias_config.spatial_sample_count = anti_alias.spatial_samples
anti_alias_config.temporal_sample_count = anti_alias.temporal_samples
if anti_alias.override_anti_aliasing:
anti_alias_config.override_anti_aliasing = True
if anti_alias.warmup_frames:
anti_alias_config.use_camera_cut_for_warm_up = True
anti_alias_config.render_warm_up_count = anti_alias.warmup_frames
if anti_alias.render_warmup_frame:
anti_alias_config.render_warm_up_frames = True
@classmethod
def add_settings_to_movie_preset(
cls,
movie_preset: unreal.MoviePipelineMasterConfig,
render_passes: List[RenderPass],
resolution: Tuple[int, int] = [1920, 1080],
file_name_format: Optional[str] = None,
output_path: Optional[str] = None,
anti_alias: RenderJobUnreal.AntiAliasSetting = RenderJobUnreal.AntiAliasSetting(),
console_variables: dict = {'r.MotionBlurQuality': 0.0},
) -> unreal.MoviePipelineMasterConfig:
"""Add settings to a movie preset.
Args:
movie_preset (unreal.MoviePipelineMasterConfig): The movie preset to add settings to.
render_passes (list): definition of render passes.
resolution (list): Resolution of the output, e.g. [1920, 1080].
file_name_format (str): Format of the output file name, e.g. '{sequence_name}/{render_pass}/{frame_number}'
output_path (str): Path of the output, e.g. 'E:/output'
anti_alias (dict): Anti-alias settings.
console_variables (dict): Console variables.
Returns:
unreal.MoviePipelineMasterConfig: The created movie preset.
"""
cls.add_render_passes(movie_preset, render_passes)
cls.add_output_config(movie_preset, resolution, file_name_format, output_path)
cls.add_anti_alias(movie_preset, anti_alias)
cls.add_console_command(movie_preset, console_variables)
unreal.EditorAssetLibrary.save_loaded_asset(movie_preset)
return movie_preset
@classmethod
def create_movie_preset(
cls,
render_passes: List[RenderPass],
resolution: Tuple[int, int] = [1920, 1080],
file_name_format: Optional[str] = None,
output_path: Optional[str] = None,
anti_alias: RenderJobUnreal.AntiAliasSetting = RenderJobUnreal.AntiAliasSetting(),
console_variables: Dict[str, float] = {'r.MotionBlurQuality': 0.0},
export_audio: bool = False,
export_transparent: bool = False,
) -> unreal.MoviePipelineMasterConfig:
"""
Create a movie preset from args.
1. Add render passes which contains the name of the render pass, and the extension of the output.
2. Set output config which contains the resolution, the file name format, and the output path.
3. Add anti-alias settings.
4. Add console command, which contains the console variables like motion blur. (https://docs.unrealengine.com/5.2/en-US/rendering-high-quality-frames-with-movie-render-queue-in-unreal-engine/#step7:configuretheconsolevariables)
5. Set render all cameras to True, which means multi-view rendering simultaneously.
Args:
render_passes (list): definition of render passes.
resolution (list): Resolution of the output, e.g. [1920, 1080].
file_name_format (str): Format of the output file name, e.g. '{sequence_name}/{render_pass}/{frame_number}'
output_path (str): Path of the output, e.g. 'E:/output'
anti_alias (dict): Anti-alias settings.
console_variables (bool): Console variables.
export_audio (bool): Whether to export audio.
export_transparent (bool): Whether to render with transparent background.
Returns:
unreal.MoviePipelineMasterConfig: The created movie preset.
"""
movie_preset = unreal.MoviePipelineMasterConfig()
cls.add_render_passes(movie_preset, render_passes, accumulator_includes_alpha=export_transparent)
cls.add_output_config(movie_preset, resolution, file_name_format, output_path)
cls.add_anti_alias(movie_preset, anti_alias)
cls.add_console_command(movie_preset, console_variables)
cls.set_render_all_cameras(movie_preset, enable=True)
if export_audio:
cls.set_export_audio(movie_preset)
return movie_preset
@classmethod
def create_job(
cls,
level: str,
level_sequence: str,
) -> unreal.MoviePipelineExecutorJob:
# check if assets exist
if not (
unreal.EditorAssetLibrary.does_asset_exist(level)
and unreal.EditorAssetLibrary.does_asset_exist(level_sequence)
):
return False
# Create a new job and add it to the queue.
new_job = cls.pipeline_queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
# TODO: if failed, new_job = False. Need to handle this case.
new_job.job_name = str(level_sequence.rsplit('/', 1)[-1])
new_job.map = utils.get_soft_object_path(level)
new_job.sequence = utils.get_soft_object_path(level_sequence)
return new_job
@classmethod
def add_job_to_queue_with_preset(
cls,
level: str,
level_sequence: str,
config: str,
) -> bool:
"""Add a job to the queue with a pre-defined preset.
Args:
level (str): level path in unreal engine.
level_sequence (str): level sequence path in unreal engine.
config (str): pre-defined preset path in unreal engine.
Returns:
bool: success or not.
"""
new_job = cls.create_job(level, level_sequence)
if not new_job:
return False
movie_preset = unreal.load_asset(config)
new_job.set_configuration(movie_preset)
unreal.log(f'Added new job ({new_job.job_name}) to queue')
return True
@classmethod
def log_msg_with_socket(
cls,
msg: str,
executor: Optional[unreal.MoviePipelinePIEExecutor] = None,
):
"""Log message with socket."""
if executor is None:
if cls.executor is None:
cls.init_executor()
executor = cls.executor
executor.send_socket_message(msg)
unreal.log(msg)
@classmethod
def add_job_to_queue(cls, job: RenderJobUnreal) -> bool:
"""Add a job to the queue.
Args:
job (RenderJobUnreal): a render job.
Returns:
bool: success or not.
"""
new_job = cls.create_job(job.map_path, job.sequence_path)
if not new_job:
return False
movie_preset = cls.create_movie_preset(
render_passes=job.render_passes,
resolution=job.resolution,
file_name_format=job.file_name_format,
output_path=job.output_path,
anti_alias=job.anti_aliasing,
console_variables=job.console_variables,
export_audio=job.export_audio,
export_transparent=job.export_transparent,
)
new_job.set_configuration(movie_preset)
unreal.log(f'Added new job ({new_job.job_name}) to queue')
return True
@classmethod
def add_job_to_queue_with_render_config(
cls,
level: str,
level_sequence: str,
render_config: Optional[dict] = None,
) -> bool:
"""Add a job to the queue with a YAML config loaded from a file.
Args:
level (str): level path in unreal engine.
level_sequence (str): level sequence path in unreal engine.
render_config (dict): YAML config loaded from a file. You can
find a template in `data/render_config.yaml`.
Returns:
bool: success or not.
Examples:
>>> render_config = {
'Resolution': [1920, 1080],
'Output_Path': 'E:/project/',
'File_Name_Format': '{sequence_name}/{render_pass}/{frame_number}',
'Console_Variables': {'r.MotionBlurQuality': 0.0},
'Anti_Alias': {'enable': False, 'spatial_samples': 8, 'temporal_samples': 8},
'Render_Passes': [
{'pass_name': 'img', 'enable': True, 'ext': 'jpeg'},
{'pass_name': 'depth', 'enable': True, 'ext': 'exr'},
{'pass_name': 'mask', 'enable': True, 'ext': 'exr'},
{'pass_name': 'flow', 'enable': True, 'ext': 'exr'},
{'pass_name': 'normal', 'enable': True, 'ext': 'png'}
]
}
>>> # or you can use the following code to load the config from a file
>>> from data.config import CfgNode
>>> render_config = CfgNode.load_yaml_with_base(PLUGIN_ROOT / 'misc/render_config_common.yaml')
>>> #
>>> CustomMoviePipeline.add_job_to_queue_with_render_config(
level='/project/',
level_sequence='/project/',
render_config=render_config
)
"""
newJob = cls.create_job(level, level_sequence)
if not newJob:
return False
if render_config is None:
render_config = cls.render_config
movie_preset = cls.create_movie_preset(
render_passes=render_config['Render_Passes'],
resolution=render_config['Resolution'],
file_name_format=render_config['File_Name_Format'],
output_path=render_config['Output_Path'],
anti_alias=render_config['Anti_Alias'],
console_variables=render_config['Console_Variables'],
)
newJob.set_configuration(movie_preset)
unreal.log(f'Added new job ({newJob.job_name}) to queue')
return True
def onQueueFinishedCallback(executor: unreal.MoviePipelineLinearExecutorBase, success: bool):
"""On queue finished callback. This is called when the queue finishes. The args
are the executor and the success of the queue, and it cannot be modified.
Args:
executor (unreal.MoviePipelineLinearExecutorBase): The executor of the queue.
success (bool): Whether the queue finished successfully.
"""
# TODO: bug fix
mss = f' Render completed. Success: {success}'
CustomMoviePipeline.log_msg_with_socket(mss, executor)
def onIndividualJobFinishedCallback(inJob: unreal.MoviePipelineExecutorJob, success: bool):
"""On individual job finished callback. This is called when an individual job
finishes. The args are the job and the success of the job, and it cannot be
modified.
Args:
inJob (unreal.MoviePipelineExecutorJob): The job that finished.
success (bool): Whether the job finished successfully.
"""
# get class variable `Jobs` to get the index of the finished job
jobs = CustomMoviePipeline.jobs
job_name = inJob.job_name
job_index = jobs.index(job_name) + 1
output_path = CustomMoviePipeline.get_output_path(inJob.get_configuration())
# XXX: for no reason (only in here), the first 4 characters of mss cannot be sent
mss = f' job rendered ({job_index}/{len(jobs)}): seq_name="{job_name}", saved to "{output_path}/{job_name}"'
CustomMoviePipeline.log_msg_with_socket(mss, CustomMoviePipeline.executor)
@classmethod
def render_queue(
cls,
queue_finished_callback: Callable = onQueueFinishedCallback,
individual_job_finished_callback: Callable = onIndividualJobFinishedCallback,
) -> None:
"""Render the queue. This will render the queue. You can pass a callback
function to be called when a job or the queue finishes. You can also pass a
custom executor to use, or the default one will be used.
Args:
queue_finished_callback (Callable): The callback function to be called when the queue finishes.
individual_job_finished_callback (Callable): The callback function to be called when an individual job finishes.
executor (unreal.MoviePipelineLinearExecutorBase): The custom executor to use, or the default one will be used.
"""
# check if there's jobs added to the queue
if len(cls.pipeline_queue.get_jobs()) == 0:
unreal.log_error('Open the Window > Movie Render Queue and add at least one job to use this example')
return
# add job_name to class variable `Jobs` for monitoring progress in the individual job finished callback
cls.jobs.clear()
for job in cls.pipeline_queue.get_jobs():
cls.jobs.append(job.job_name)
# set callbacks
if cls.executor is None:
cls.init_executor()
cls.executor.on_executor_finished_delegate.add_callable_unique(queue_finished_callback)
cls.executor.on_individual_job_finished_delegate.add_callable_unique(individual_job_finished_callback)
# render the queue
cls.executor.execute(cls.pipeline_queue)
cls.log_msg_with_socket('Start Render')
def main():
CustomMoviePipeline.clear_queue()
CustomMoviePipeline.add_job_to_queue(
RenderJobUnreal(
map_path='/project/',
sequence_path='/project/',
resolution=[1920, 1080],
output_path='E:/project/',
file_name_format='{sequence_name}/{render_pass}/{camera_name}/{frame_number}',
console_variables={'r.MotionBlurQuality': 0.0},
anti_alias={'enable': False},
)
)
CustomMoviePipeline.render_queue()
if __name__ == '__main__':
# from utils import loadRegistry
# registry = loadRegistry(RenderQueue)
# registry.register()
main()
|
import unreal
import argparse
# Info about Python with IKRigs here:
# https://docs.unrealengine.com/5.2/en-US/using-python-to-create-and-edit-ik-rigs-in-unreal-engine/
# https://docs.unrealengine.com/5.2/en-US/using-python-to-create-and-edit-ik-retargeter-assets-in-unreal-engine/
parser = argparse.ArgumentParser(description = 'Creates an IKRig given a SkeletalMesh')
parser.add_argument('--skeletalMesh', help='Skeletal Mesh to Use')
args = parser.parse_args()
# Find or Create the IKRig
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
package_path = args.skeletalMesh.rsplit('/', 1)[0] + '/'
asset_name = args.skeletalMesh.split('.')[-1] + '_IKRig'
ikr = skel_mesh = unreal.load_asset(name = package_path + asset_name )
if not ikr:
ikr = asset_tools.create_asset( asset_name=asset_name,
package_path=package_path,
asset_class=unreal.IKRigDefinition,
factory=unreal.IKRigDefinitionFactory())
# Get the controller
ikr_controller = unreal.IKRigController.get_controller(ikr)
# Set the skeletal mesh
skel_mesh = unreal.load_asset(name = args.skeletalMesh)
ikr_controller.set_skeletal_mesh(skel_mesh)
# Get the bone list
bone_names = unreal.DazToUnrealBlueprintUtils.get_bone_list(ikr)
# Use bone list to guess character type
character_type = "unknown"
if all(x in bone_names for x in ['hip', 'pelvis', 'spine1', 'spine4']): character_type = 'Genesis9'
if all(x in bone_names for x in ['hip', 'abdomenLower', 'abdomenUpper', 'chestUpper']): character_type = 'Genesis8'
if all(x in bone_names for x in ['hip', 'abdomenLower', 'abdomenUpper', 'chestUpper', 'lHeel']): character_type = 'Genesis3'
if all(x in bone_names for x in ['Hips', 'HeadTop_End']): character_type = 'Mixamo'
if not character_type == "unknown":
# Setup goal names
chains = []
CHAIN_NAME = 0
CHAIN_START = 1
CHAIN_END = 2
CHAIN_GOAL = 3
# Define the retarget chains based on the character
if character_type == 'Genesis9':
retarget_root_name = 'hip'
chains.append(['Spine', 'spine1', 'spine4', None])
chains.append(['Head', 'neck1', 'head', None])
chains.append(['LeftArm', 'l_upperarm', 'l_hand', 'l_hand_Goal'])
chains.append(['RightArm', 'r_upperarm', 'r_hand', 'r_hand_Goal'])
chains.append(['LeftClavicle', 'l_shoulder', 'l_shoulder', None])
chains.append(['RightClavicle', 'r_shoulder', 'r_shoulder', None])
chains.append(['LeftLeg', 'l_thigh', 'l_toes', 'l_toes_Goal'])
chains.append(['RightLeg', 'r_thigh', 'r_toes', 'r_toes_Goal'])
chains.append(['LeftPinky', 'l_pinky1', 'l_pinky3', None])
chains.append(['RightPinky', 'r_pinky1', 'r_pinky3', None])
chains.append(['LeftRing', 'l_ring1', 'l_ring3', None])
chains.append(['RightRing', 'r_ring1', 'r_ring3', None])
chains.append(['LeftMiddle', 'l_mid1', 'l_mid3', None])
chains.append(['RightMiddle', 'r_mid1', 'r_mid3', None])
chains.append(['LeftIndex', 'l_index1', 'l_index3', None])
chains.append(['RightIndex', 'r_index1', 'r_index3', None])
chains.append(['LeftThumb', 'l_thumb1', 'l_thumb3', None])
chains.append(['RightThumb', 'r_thumb1', 'r_thumb3', None])
chains.append(['HandRootIK', 'ik_hand_root', 'ik_hand_root', None])
chains.append(['RightHandIK', 'ik_hand_r', 'ik_hand_r', None])
chains.append(['HandGunIK', 'ik_hand_gun', 'ik_hand_gun', None])
chains.append(['FootRootIK', 'ik_foot_root', 'ik_foot_root', None])
chains.append(['LeftFootIK', 'ik_foot_l', 'ik_foot_l', None])
chains.append(['RightFootIK', 'ik_foot_r', 'ik_foot_r', None])
chains.append(['Root', 'root', 'root', None])
if character_type == 'Genesis8' or character_type == 'Genesis3':
retarget_root_name = 'hip'
chains.append(['Spine', 'abdomenLower', 'chestUpper', None])
chains.append(['Head', 'neckLower', 'head', None])
chains.append(['LeftArm', 'lShldrBend', 'lHand', 'lHand_Goal'])
chains.append(['RightArm', 'rShldrBend', 'rHand', 'rHand_Goal'])
chains.append(['LeftClavicle', 'lCollar', 'lCollar', None])
chains.append(['RightClavicle', 'rCollar', 'rCollar', None])
chains.append(['LeftLeg', 'lThighBend', 'lToe', 'lToe_Goal'])
chains.append(['RightLeg', 'rThighBend', 'rToe', 'rToe_Goal'])
chains.append(['LeftPinky', 'lPinky1', 'lPinky3', None])
chains.append(['RightPinky', 'rPinky1', 'rPinky3', None])
chains.append(['LeftRing', 'lRing1', 'lRing3', None])
chains.append(['RightRing', 'rRing1', 'rRing3', None])
chains.append(['LeftMiddle', 'lMid1', 'lMid3', None])
chains.append(['RightMiddle', 'rMid1', 'rMid3', None])
chains.append(['LeftIndex', 'lIndex1', 'lIndex3', None])
chains.append(['RightIndex', 'rIndex1', 'rIndex3', None])
chains.append(['LeftThumb', 'lThumb1', 'lThumb3', None])
chains.append(['RightThumb', 'rThumb1', 'rThumb3', None])
chains.append(['HandRootIK', 'ik_hand_root', 'ik_hand_root', None])
chains.append(['RightHandIK', 'ik_hand_r', 'ik_hand_r', None])
chains.append(['HandGunIK', 'ik_hand_gun', 'ik_hand_gun', None])
chains.append(['FootRootIK', 'ik_foot_root', 'ik_foot_root', None])
chains.append(['LeftFootIK', 'ik_foot_l', 'ik_foot_l', None])
chains.append(['RightFootIK', 'ik_foot_r', 'ik_foot_r', None])
chains.append(['Root', 'root', 'root', None])
if character_type == 'Mixamo':
retarget_root_name = 'Hips'
chains.append(['Spine', 'Spine', 'Spine2', None])
chains.append(['Head', 'Neck', 'Head', None])
chains.append(['LeftArm', 'LeftArm', 'LeftHand', 'LeftHand_Goal'])
chains.append(['RightArm', 'RightArm', 'RightHand', 'RightHand_Goal'])
chains.append(['LeftClavicle', 'LeftShoulder', 'LeftShoulder', None])
chains.append(['RightClavicle', 'RightShoulder', 'RightShoulder', None])
chains.append(['LeftLeg', 'LeftUpLeg', 'LeftToeBase', 'LeftToeBase_Goal'])
chains.append(['RightLeg', 'RightUpLeg', 'RightToeBase', 'RightToeBase_Goal'])
chains.append(['LeftPinky', 'LeftHandPinky1', 'LeftHandPinky3', None])
chains.append(['RightPinky', 'RightHandPinky1', 'RightHandPinky3', None])
chains.append(['LeftRing', 'LeftHandRing1', 'LeftHandRing3', None])
chains.append(['RightRing', 'RightHandRing1', 'RightHandRing3', None])
chains.append(['LeftMiddle', 'LeftHandMiddle1', 'LeftHandMiddle3', None])
chains.append(['RightMiddle', 'RightHandMiddle1', 'RightHandMiddle3', None])
chains.append(['LeftIndex', 'LeftHandIndex1', 'LeftHandIndex3', None])
chains.append(['RightIndex', 'RightHandIndex1', 'RightHandIndex3', None])
chains.append(['LeftThumb', 'LeftHandThumb1', 'LeftHandThumb3', None])
chains.append(['RightThumb', 'RightHandThumb1', 'RightHandThumb3', None])
# Create the solver, remove any existing ones
while ikr_controller.get_solver_at_index(0):
ikr_controller.remove_solver(0)
fbik_index = ikr_controller.add_solver(unreal.IKRigFBIKSolver)
# Setup root
ikr_controller.set_root_bone(retarget_root_name, fbik_index)
ikr_controller.set_retarget_root(retarget_root_name)
# Remove existing goals
for goal in ikr_controller.get_all_goals():
ikr_controller.remove_goal(goal.get_editor_property('goal_name'))
# Create the goals
for chain in chains:
if chain[CHAIN_GOAL]:
ikr_controller.add_new_goal(chain[CHAIN_GOAL], chain[CHAIN_END])
ikr_controller.connect_goal_to_solver(chain[CHAIN_GOAL], fbik_index)
# Remove old Retarget Chains
for chain in ikr_controller.get_retarget_chains():
ikr_controller.remove_retarget_chain(chain.get_editor_property('chain_name'))
# Setup Retarget Chains
for chain in chains:
if chain[CHAIN_GOAL]:
ikr_controller.add_retarget_chain(chain[CHAIN_NAME], chain[CHAIN_START], chain[CHAIN_END], chain[CHAIN_GOAL])
else:
ikr_controller.add_retarget_chain(chain[CHAIN_NAME], chain[CHAIN_START], chain[CHAIN_END], '')
|
import unreal
from downsize import step0_settings as settings
def remap_uepath_to_filepath(uepath: str) -> str: #언리얼 패스 -> 파일 패스로 변환
'''
## Description: Remap the Unreal Engine path to the file path
'''
projectPath = unreal.Paths.project_dir()
#print(projectPath)
filepath = uepath.replace('/Game/', projectPath + 'Content/')
name = filepath.rsplit('.', 1)[0]
name = name + '.uasset'
print(name)
return name
def reimport_texture ( tex_ue_file_path: str, file_path : str) :
textureFactory = unreal.ReimportTextureFactory()
importTask = unreal.AssetImportTask()
importTask.automated = True
importTask.filename = file_path
length = len(tex_ue_file_path.split('/'))
destination_name = tex_ue_file_path.split('/')[length-1].split('.')[0]
destination_path = tex_ue_file_path.rsplit('/', 1)[0] + '/'
importTask.destination_name = destination_name
importTask.destination_path = destination_path
print('destination_name ', destination_name)
importTask.replace_existing = True
importTask.save = True
importTask.factory = textureFactory
executeImportTask(importTask)
return True
def executeImportTask(task):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
return True
selectedAssets = unreal.EditorUtilityLibrary.get_selected_assets()
#이미지 저장된 드라이브 경로
texture_folder = settings.texture_folder
source_drive = settings.source_folder
desired_size = settings.desired_size
for asset in selectedAssets:
tex_asset: unreal.Texture2D = asset
tex_width = tex_asset.blueprint_get_size_x()
if tex_width > desired_size :
tex_path = tex_asset.get_path_name()
tex_ue_file_path = remap_uepath_to_filepath(tex_path)
import_info: unreal.AssetImportData = tex_asset.get_editor_property('asset_import_data')
are_sources = len(import_info.extract_filenames()) > 0
source_file = import_info.get_first_filename()
has_source = len(source_file) != 0
new_tex_path = remap_uepath_to_filepath(tex_path).replace(source_drive, texture_folder)
file_path = tex_ue_file_path.replace(source_drive, texture_folder).replace('.uasset','.PNG')
print('tex_path: ', tex_path)
reimport_texture(tex_path, file_path)
unreal.EditorAssetLibrary.save_asset(tex_asset.get_path_name())
|
import unreal
import math
import json
import pprint
import datetime
import os
import csv
import uuid
from enum import Enum
from typing import Any, List, Optional, Dict, TypeVar, Type, Callable, cast
# Função para carregar um arquivo CSV e convertê-lo em uma grade (grid) de números
def load_csv(file_path):
grid = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
grid_row = []
for cell in row:
if cell.strip() == '': # Se a célula estiver vazia, adiciona 0
grid_row.append(0)
else: # Caso contrário, converte o valor para inteiro
grid_row.append(int(cell))
grid.append(grid_row)
return grid
# Função para criar um componente de colisão (BoxComponent) em um ator
def create_collision(actor: unreal.PaperSpriteActor, x, y, tile_size):
initial_children_count = actor.root_component.get_num_children_components() # Conta os componentes filhos existentes
# Obtém o subsistema para manipular subobjetos (componentes) no Unreal
subsystem = unreal.get_engine_subsystem(unreal.SubobjectDataSubsystem)
root_data_handle = subsystem.k2_gather_subobject_data_for_instance(actor)[0]
# Cria um novo componente de colisão (BoxComponent)
collision_component = unreal.BoxComponent()
sub_handle, _ = subsystem.add_new_subobject(params=unreal.AddNewSubobjectParams(parent_handle=root_data_handle, new_class=collision_component.get_class()))
subsystem.rename_subobject(handle=sub_handle, new_name=unreal.Text(f"LDTK_Collision_{uuid.uuid4()}"))
# Obtém o novo componente adicionado
new_component: unreal.BoxComponent = actor.root_component.get_child_component(initial_children_count)
# Configura o tamanho, posição e rotação do componente de colisão
new_component.set_box_extent(unreal.Vector(tile_size / 2, tile_size / 2, 64))
new_component.set_relative_location_and_rotation(unreal.Vector((x + (tile_size / 2)), -32, -(y + (tile_size / 2))), unreal.Rotator(90, 0, 0),False, False)
new_component.set_collision_profile_name("BlockAll") # Define o perfil de colisão para "BlockAll"
# Função para criar colisões com base em uma grade (grid)
def spawn_collisions_from_grid(grid, actor: unreal.PaperSpriteActor, composite_width, composite_height):
tile_size = 16 # Tamanho de cada tile
for row_index, row in enumerate(grid): # Itera sobre as linhas da grade
for col_index, cell in enumerate(row): # Itera sobre as células da linha
x = (col_index * tile_size) - (composite_width / 2) # Calcula a posição X
y = row_index * tile_size - (composite_height / 2) # Calcula a posição Y
if cell == 1: # Se a célula for 1, cria uma colisão
create_collision(actor, x, y, tile_size)
# Função para encontrar todas as subpastas em um diretório
def find_all_subfolders(path):
subfolders = []
for root, dirs, files in os.walk(path): # Percorre o diretório
for dir in dirs:
subfolders.append(os.path.join(root, dir)) # Adiciona o caminho da subpasta à lista
return subfolders
# Tipo de dicionário para armazenar o conteúdo de um diretório
DirectoryContents = Dict[str, Dict[str, Any]]
# Função para obter o conteúdo de um diretório, filtrando arquivos específicos
def get_directory_contents(path: str) -> dict:
directory_contents = {}
for root, _, files in os.walk(path): # Percorre o diretório
root = os.path.normpath(root)
# Filtra arquivos com extensões específicas
filtered_files = [file for file in files if file.endswith(('_bg.png', '_composite.png', 'Bg_textures.png', 'Collisions.csv', 'Collisions.png', 'Collisions-int.png', 'data.json', 'Wall_shadows.png'))]
if filtered_files:
directory_contents[root] = {file: None for file in filtered_files} # Armazena os arquivos filtrados
return directory_contents
# Função principal para importar um mundo (nível) do LDtk para o Unreal Engine
def importWorld(folder_name: str):
level_files_location = "LdtkFiles/simplified" # Caminho relativo para os arquivos do LDtk
base_directory = "/Game" # Diretório base do Unreal Engine
ldtk_files_directory = "LdtkFiles" # Diretório dos arquivos LDtk
ldtk_simplified_directory = "simplified" # Subdiretório simplificado
composite_filename = "_composite" # Nome do arquivo de textura composta
data_filename = "data.json" # Nome do arquivo de dados JSON
collisions_filename = "Collisions.csv" # Nome do arquivo de colisões CSV
if len(str(folder_name)) == 0: # Verifica se o nome da pasta foi fornecido
print("Unreal LDtk: No folder name provided. Exiting...")
return
else:
folder_name = str(folder_name)
# Monta o caminho completo para o diretório do nível
base_path = os.path.join(base_directory, ldtk_files_directory, folder_name, ldtk_simplified_directory)
content_directory = unreal.Paths.project_content_dir()
level_directory = os.path.join(content_directory, ldtk_files_directory, folder_name, ldtk_simplified_directory).replace("\\", "/")
directories = find_all_subfolders(level_directory) # Encontra todas as subpastas
if directories.__len__() > 0: # Verifica se há subpastas
print(f"Unreal LDtk: Found {len(directories)} directories in {level_directory}. Beginning import...")
else:
print(f"Unreal LDtk: No directories found in {level_directory}. \nThis might be because you are missing the LdtkFiles directory, or that the folder level name is wrong. Exiting...")
return
entity_index_counter = 0 # Contador para indexar entidades
# Itera sobre cada subpasta (nível)
for index, directory in enumerate(directories):
_, directory_name = os.path.split(directory)
full_path_composite = os.path.join(base_path, directory_name, composite_filename)
full_path_data = os.path.join(level_directory, directory_name, data_filename).replace("\\", "/")
full_path_collisions = os.path.join(level_directory, directory_name, collisions_filename).replace("\\", "/")
# Verifica se os arquivos necessários existem
composite_exists = unreal.EditorAssetLibrary.does_asset_exist(full_path_composite)
data_exists = os.path.exists(full_path_data)
collisions_exists = os.path.exists(full_path_collisions)
## Criando o Sprite ##
if composite_exists:
composite_texture = load_texture_asset(full_path_composite) # Carrega a textura composta
composite_sprite = create_sprite_from_texture(composite_texture, directory_name) # Cria um sprite a partir da textura
else:
print(f"Unreal LDtk: Missing composite texture asset, skipping...")
## Lendo o arquivo JSON ##
if data_exists:
data_file = open(full_path_data)
data = json.load(data_file) # Carrega os dados do JSON
data_file.close()
composite_spawn_coords = (data['x'] + (data['width'] / 2), data['y'] + (data['height'] / 2), 0) # Calcula as coordenadas de spawn
else:
print(f"Unreal LDtk: Missing data.json file, skipping...")
# Se a textura composta e os dados existirem, spawna o ator e as entidades
if (composite_exists and data_exists):
spawned_composite_actor = spawn_sprite_in_world(composite_sprite, (composite_spawn_coords))
## Spawnando Entidades ##
for _, entities in data['entities'].items():
for index, entity in enumerate(entities):
spawn_entity_in_world(f"LDtk_{entity['id']}_{entity_index_counter}", data['x'] + entity['x'], data['y'] + entity['y'])
entity_index_counter += 1
else:
print(f"Unreal LDtk: Missing composite and/or data.json file, skipping entities...")
## Spawnando Colisões ##
if composite_exists and collisions_exists:
grid = load_csv(full_path_collisions) # Carrega a grade de colisões do CSV
spawn_collisions_from_grid(grid, spawned_composite_actor, data['width'], data['height']) # Cria as colisões
else:
print(f"Unreal LDtk: Missing Composite and/or Collisions.csv file, skipping collisions...")
# Função para verificar e deletar um sprite existente
def check_and_delete_existing_sprite(sprite_name):
sprite_path = "/project/" + sprite_name
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in all_actors:
if actor.get_actor_label() == sprite_name:
unreal.EditorLevelLibrary.destroy_actor(actor)
print(f"Deleting existing composite sprite: {actor}")
break
if unreal.EditorAssetLibrary.does_asset_exist(sprite_path):
unreal.EditorAssetLibrary.delete_asset(sprite_path)
# Função para verificar e deletar uma entidade existente
def check_and_delete_existing_entity(entity_name):
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in all_actors:
if actor.get_actor_label() == entity_name:
unreal.EditorLevelLibrary.destroy_actor(actor)
print(f"Deleting existing entity: {actor}")
break
# Função para carregar uma textura do Unreal Engine
def load_texture_asset(texture_path):
texture = unreal.EditorAssetLibrary.load_asset(texture_path)
return texture
# Função para criar um sprite a partir de uma textura
def create_sprite_from_texture(texture_asset: unreal.PaperSprite, world_name):
try:
sprite_path = "/project/"
sprite_name = f"LDtk_{world_name}_{texture_asset.get_name()}_sprite"
check_and_delete_existing_sprite(sprite_name=sprite_name) # Verifica e deleta sprites existentes
# Cria um novo sprite no Unreal Engine
sprite_package = unreal.AssetToolsHelpers.get_asset_tools().create_asset(asset_name=sprite_name, package_path=sprite_path, asset_class=unreal.PaperSprite, factory=unreal.PaperSpriteFactory())
sprite_package.set_editor_property("source_texture", texture_asset) # Define a textura do sprite
print("Sprite saved at: ", sprite_path)
return sprite_package
except:
pass
# Função para spawnar uma entidade no mundo
def spawn_entity_in_world(name, x, y):
location = unreal.Vector(x, 1, -y) # Define a posição da entidade
check_and_delete_existing_entity(name) # Verifica e deleta entidades existentes
# Spawna o ator no mundo
actor: unreal.Actor = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.Actor().get_class(), location)
if actor:
actor.set_actor_label(name) # Define o nome do ator
print(f"Spawning entity: {actor.get_actor_label()}")
return actor
# Função para spawnar um sprite no mundo
def spawn_sprite_in_world(sprite, location=(0, 0, 0), scale=(1, 1, 1)):
spawn_location = unreal.Vector(location[0], location[2], -location[1]) # Define a posição de spawn
scale_vector = unreal.Vector(scale[0], scale[1], scale[2]) # Define a escala
actor_transform = unreal.Transform(spawn_location, unreal.Rotator(0, 0, 0), scale_vector) # Define a transformação
# Spawna o ator no mundo
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(sprite, spawn_location)
if actor:
sprite_component = actor.render_component
if sprite_component:
sprite_component.set_sprite(sprite) # Define o sprite
actor.set_actor_scale3d(scale_vector) # Define a escala
actor.set_actor_transform(actor_transform, False, True) # Aplica a transformação
print(f"Spawning composite sprite: {actor.get_actor_label()}")
return actor
return None
# Executa a função principal para importar o mundo
importWorld(folder_name)
# Exibe a data e hora atual
print(datetime.datetime.now())
|
import unreal
import tempfile
import os
import csv
from ..common.utils import get_sheet_data
SHEET_ID = "1pJmY-9qeM85mW0X69SqVfck-YENJKnh-y-vrg_VN5BQ"
GID = "866191023"
TABLE_PATH = "/project/"
def create_target_data_list(data, row):
"""TargetDataList 문자열 생성"""
# 첫 번째 TargetDataList
allowed_types1 = str(row.get('허용타입', '0')).strip()
required_prop_class_tag1 = str(row.get('필수클래스태그', '')).strip()
required_prop_feature_tag1 = str(row.get('필수기능태그', '')).strip()
target_description1 = str(row.get('대상설명', '')).strip()
# 두 번째 TargetDataList
allowed_types2 = str(row.get('허용타입2', '0')).strip()
required_prop_class_tag2 = str(row.get('필수클래스태그2', '')).strip()
required_prop_feature_tag2 = str(row.get('필수기능태그2', '')).strip()
target_description2 = str(row.get('대상설명2', '')).strip()
# 첫 번째 항목이 비어 있는지 확인
is_first_empty = not (allowed_types1 or required_prop_class_tag1 or required_prop_feature_tag1 or target_description1)
# 두 번째 항목이 비어 있는지 확인
is_second_empty = not (allowed_types2 or required_prop_class_tag2 or required_prop_feature_tag2 or target_description2)
if is_first_empty and is_second_empty:
return "()"
first_item = f'(AllowedTypes={allowed_types1 or "0"},RequiredPropClassTag=(TagName="{required_prop_class_tag1}"),RequiredPropFeatureTag=(TagName="{required_prop_feature_tag1}"),Description="{target_description1}")'
if is_second_empty:
return f'({first_item})'
second_item = f'(AllowedTypes={allowed_types2 or "0"},RequiredPropClassTag=(TagName="{required_prop_class_tag2}"),RequiredPropFeatureTag=(TagName="{required_prop_feature_tag2}"),Description="{target_description2}")'
return f'({first_item},{second_item})'
def set_file_writable(file_path):
"""파일의 읽기 전용 속성을 해제"""
if os.path.exists(file_path):
try:
import stat
current_permissions = os.stat(file_path).st_mode
os.chmod(file_path, current_permissions | stat.S_IWRITE)
print(f"파일 쓰기 권한 설정됨: {file_path}")
return True
except Exception as e:
print(f"파일 권한 변경 실패: {str(e)}")
return False
return True
def checkout_and_make_writable(asset_path):
"""에셋을 소스 컨트롤에서 checkout하고 쓰기 가능하게 만듦"""
try:
# 에셋이 존재하는지 확인
if not unreal.EditorAssetLibrary.does_asset_exist(asset_path):
print(f"에셋이 존재하지 않음: {asset_path}")
return {'success': False, 'error': f"에셋이 존재하지 않습니다: {asset_path}"}
# 에셋 로드
asset = unreal.EditorAssetLibrary.load_asset(asset_path)
if not asset:
print(f"에셋을 로드할 수 없음: {asset_path}")
return {'success': False, 'error': f"에셋을 로드할 수 없습니다: {asset_path}"}
# 소스 컨트롤 상태 확인
try:
source_control = unreal.EditorUtilityLibrary.get_source_control_provider()
if source_control and source_control.is_enabled():
state = source_control.get_state(asset_path)
if state:
other_user = state.get_other_user()
if other_user:
error_msg = f"{other_user}에 의해 에셋이 잠겨있습니다. {other_user}에게 문의해주세요."
print(f"⚠️ {error_msg}")
return {'success': False, 'error': error_msg}
except Exception as e:
print(f"소스 컨트롤 상태 확인 중 오류 발생: {str(e)}")
# 에셋 체크아웃 시도
try:
if not unreal.EditorAssetLibrary.checkout_loaded_asset(asset):
print(f"⚠️ 에셋 체크아웃 실패: {asset_path}")
return {'success': False, 'error': "에셋이 잠겨있습니다. 소스 컨트롤 상태를 확인해주세요."}
except Exception as e:
print(f"체크아웃 중 오류 발생: {str(e)}")
return {'success': False, 'error': "에셋이 잠겨있습니다. 소스 컨트롤 상태를 확인해주세요."}
print(f"✅ 에셋이 체크아웃됨: {asset_path}")
# 에셋의 실제 파일 경로 가져오기
package_path = unreal.EditorAssetLibrary.get_path_name_for_loaded_asset(asset)
if not package_path:
print(f"에셋 경로를 찾을 수 없음: {asset_path}")
return {'success': False, 'error': f"에셋 경로를 찾을 수 없습니다: {asset_path}"}
# 파일 쓰기 가능하게 만들기
if not set_file_writable(package_path):
print(f"파일을 쓰기 가능하게 만들 수 없음: {package_path}")
return {'success': False, 'error': f"파일을 쓰기 가능하게 만들 수 없습니다: {package_path}"}
print(f"✅ 에셋이 쓰기 가능하게 설정됨: {asset_path}")
return {'success': True}
except Exception as e:
print(f"에셋 설정 중 오류 발생: {str(e)}")
return {'success': False, 'error': str(e)}
def import_table(sheet_id=SHEET_ID, gid=GID):
try:
print(f"=== 메타 액션 임포트 시작 ===")
print("테이블 정보:")
print(f"- Sheet ID: {sheet_id}")
print(f"- GID: {gid}")
print(f"- 테이블 경로: {TABLE_PATH}")
data = get_sheet_data(sheet_id, gid)
if not data:
return {'success': False, 'error': '데이터를 가져올 수 없습니다.'}
# 데이터 테이블 생성 또는 로드
data_table = unreal.EditorAssetLibrary.load_asset(TABLE_PATH)
if not data_table:
data_table = unreal.EditorAssetLibrary.make_new_data_table_asset(TABLE_PATH, unreal.DataTable)
if not data_table:
print(f"데이터 테이블 생성 실패: {TABLE_PATH}")
return {'success': False, 'error': '데이터 테이블을 생성할 수 없습니다.'}
# 에셋 checkout
checkout_result = checkout_and_make_writable(TABLE_PATH)
if not checkout_result['success']:
return checkout_result
# 데이터 처리 및 저장
if process_and_save_data(data_table, data):
print(f"데이터 테이블 업데이트 성공 ({len(data)}행)")
return {'success': True, 'count': len(data)}
return {'success': False, 'error': '데이터 처리 중 오류가 발생했습니다.'}
except Exception as e:
print(f"메타 액션 임포트 중 오류 발생: {str(e)}")
import traceback
traceback.print_exc()
return {'success': False, 'error': str(e)}
def process_and_save_data(data_table, data):
try:
# CSV 파일 생성
temp_dir = tempfile.gettempdir()
temp_csv_path = os.path.join(temp_dir, "temp_meta_action.csv")
fieldnames = ['Name', 'ActionKey', 'Type', 'Description', 'TargetDataList',
'bIsEnabled', 'bIsExposedToLibrary', 'bIsUseableByS2M']
with open(temp_csv_path, 'w', encoding='utf-8', newline='') as outfile:
writer = csv.DictWriter(outfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
action_key = str(row.get('행동키', '')).strip()
if not action_key:
continue
new_row = {
'Name': action_key,
'ActionKey': f'(TagName=ActionKey.{action_key})',
'Type': str(row.get('타입', '')).strip(),
'Description': str(row.get('설명', '')).strip(),
'TargetDataList': create_target_data_list(data, row),
'bIsEnabled': str(row.get('활성화', 'false')).upper(),
'bIsExposedToLibrary': str(row.get('유저사용', 'false')).upper(),
'bIsUseableByS2M': str(row.get('S2M사용', 'false')).upper()
}
writer.writerow(new_row)
# 데이터 테이블 임포트 설정
struct_path = "/project/.CinevMetaActionData"
row_struct = unreal.load_object(None, struct_path)
if not row_struct:
print(f"구조체를 찾을 수 없습니다: {struct_path}")
return False
# CSV 임포트 설정
factory = unreal.CSVImportFactory()
factory.automated_import_settings.import_row_struct = row_struct
# 임포트 태스크 설정
task = unreal.AssetImportTask()
task.filename = temp_csv_path
task.destination_path = os.path.dirname(TABLE_PATH)
task.destination_name = os.path.basename(TABLE_PATH).replace("/Game/", "")
task.replace_existing = True
task.automated = True
task.save = True
task.factory = factory
# 데이터 테이블 임포트 실행
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
# 임시 파일 삭제
try:
os.remove(temp_csv_path)
print(f"임시 파일 삭제됨: {temp_csv_path}")
except Exception as e:
print(f"임시 파일 삭제 실패: {e}")
# 콘텐츠 브라우저 새로고침
try:
content_browser = unreal.get_editor_subsystem(unreal.ContentBrowserSubsystem)
content_browser.refresh_folder(os.path.dirname(TABLE_PATH))
except Exception as e:
print(f"콘텐츠 브라우저 새로고침 실패: {e}")
return True
except Exception as e:
print(f"데이터 처리 중 오류 발생: {str(e)}")
return False
def import_meta_action(sheet_id, gid, table_path):
try:
print(f"=== 메타 액션 임포트 시작 ===")
print("테이블 정보:")
print(f"- Sheet ID: {sheet_id}")
print(f"- GID: {gid}")
print(f"- 테이블 경로: {table_path}")
data = get_sheet_data(sheet_id, gid)
if not data:
return False
# 에셋 checkout
if not checkout_and_make_writable(table_path):
return False
# 데이터 테이블 생성 또는 로드
data_table = unreal.EditorAssetLibrary.load_asset(table_path)
if not data_table:
data_table = unreal.EditorAssetLibrary.make_new_data_table_asset(table_path, unreal.DataTable)
if not data_table:
print(f"데이터 테이블 생성 실패: {table_path}")
return False
# 데이터 처리 및 저장
if process_and_save_data(data_table, data):
print(f"데이터 테이블 업데이트 성공 ({len(data)}행)")
return True
return False
except Exception as e:
print(f"메타 액션 임포트 중 오류 발생: {str(e)}")
return False
|
#Thanks to Mystfit https://github.com/project/-StableDiffusionTools/project/
import importlib.util
import unreal
import signal
import install_dependencies
#adding menus
menus = unreal.ToolMenus.get()
# Find the 'edit' menu, this should not fail,
# but if we're looking for a menu we're unsure about 'if not'
# works as nullptr check,
main_menu = menus.find_menu("LevelEditor.MainMenu")
my_menu = main_menu.add_sub_menu("[My.Menu](https://github.com/project/-Dream)", "Python", "My Menu", "Let's Dream")
for name in ["Dream", "Install"]:
e = unreal.ToolMenuEntry(
name = name,
type = unreal.MultiBlockType.MENU_ENTRY, # If you pass a type that is not supported Unreal will let you know,
)
e.set_label(name)
if name == "Dream":
e.set_string_command(
type=unreal.ToolMenuStringCommandType.PYTHON,
custom_type=name,
string="import unreal;unreal.EditorUtilitySubsystem().spawn_and_register_tab(unreal.EditorAssetLibrary.load_asset('/project/.dreamUI'))" #< !! This is where things get interesting
)
if name == "Install":
e.set_string_command(
type=unreal.ToolMenuStringCommandType.PYTHON,
custom_type=name,
string="install_dependencies.py" #< !! This is where things get interesting
)
my_menu.add_menu_entry("Items", e)
menus.refresh_all_widgets()
#------------------------------------------------------------------------------
# Replace print() command to fix Unreal flagging every Python print call as an error
print = unreal.log
# Redirect missing SIGKILL signal on windows to SIGTERM
signal.SIGKILL = signal.SIGTERM
def SD_dependencies_installed():
dependencies = install_dependencies.dependencies
installed = True
modules = [package_opts["module"] if "module" in package_opts else package_name for package_name, package_opts in dependencies.items()]
for module in modules:
print(f"Looking for module {module}")
try:
if not importlib.util.find_spec(module):
raise(ValueError())
except ValueError:
print("Missing Stable Diffusion dependency {0}. Please install or update the plugin's python dependencies".format(module))
installed = False
return installed
# Check if dependencies are installed correctly
dependencies_installed = SD_dependencies_installed()
print("Stable Diffusion dependencies are {0}available".format("" if dependencies_installed else "not "))
if SD_dependencies_installed:
try:
import load_diffusers_bridge
except ImportError:
print("Skipping default Diffusers Bridge load until dependencies have been installed")
|
"""
This module shows some basic actor interactions
with a focus on processing the actor hierarchy
in our current 3D level
"""
import unreal
from recipebook.unreal_systems import (
EditorAssetLibrary,
EditorActorSubsystem,
UnrealEditorSubsystem
)
function_demo_class = None
def get_all_actors(include_possessables = True, include_spawnables = True):
"""
Get the list of Actors in the current Editor World / 3D Level
"""
actors = []
if include_possessables:
actors += EditorActorSubsystem.get_all_level_actors() or []
if include_spawnables:
actors += unreal.GameplayStatics.get_all_actors_with_tag(
UnrealEditorSubsystem.get_editor_world(),
"SequencerActor"
) or []
return actors
def get_root_actors():
"""Get all root actors (top-most in the 3D Level)"""
return [
actor
for actor in get_all_actors()
if not actor.get_attach_parent_actor()
and not actor.get_parent_actor()
]
def is_functions_demo_actor(actor):
"""
check if the given actor is a 'ue_functions_demo' blueprint actor
parameters:
actor: the actor to process
return:
True if the actor is an ue_functions_demo BP
"""
global function_demo_class
if not function_demo_class:
asset_path = "/project/"
if EditorAssetLibrary.does_asset_exist(asset_path):
function_demo_class = EditorAssetLibrary.load_asset(asset_path).generated_class()
else:
unreal.log_warning(f"functions_demo asset not found in its expected location: {asset_path}")
return
return unreal.MathLibrary.class_is_child_of(actor.get_class(), function_demo_class)
def get_scene_hierarchy():
"""
process the 3D level for basic actor hierarchy information
return:
the scene's actor hierarchy in a json compliant dict
"""
# get all top level actors
top_level_actors = get_root_actors()
# get the hierarchy for each top level actor
data = {
str(actor.get_path_name()): walk_actor(actor)
for actor in top_level_actors
}
return data
def walk_actor(actor):
"""
walk the given actor's hierarchy and convert it to a json compliant dict
parameters:
actor: the actor to process
return:
dict: the actor's hierarchy in a json compliant dict
"""
# Get the asset path
source_asset = get_asset_from_actor(actor)
asset_path = source_asset.get_outermost().get_path_name() if source_asset else ""
# separate the nested actors from the spawned actors:
spawned_actors = actor.get_all_child_actors()
nested_actors = [
child
for child in actor.get_attached_actors() # all actors under the current actor
if child not in spawned_actors # actors spawned by the current actor
]
if spawned_actors:
print(f"{actor.get_actor_label()} spawns the following actors:")
for child in spawned_actors:
print(f"\t> {child.get_actor_label()}")
# Recurse through any nested actors
children = {
str(child.get_path_name()): walk_actor(child)
for child in nested_actors
}
# Store the actor data
data = {
"display_name": str(actor.get_actor_label()),
"actor_class": str(actor.get_class().get_name()),
"asset_path": str(asset_path),
"transform": get_actor_root_transform(actor),
"children": children
}
# if we know the class we're interacting with we can use `call_method`
# to run functions declared in the Blueprint Graph
if is_functions_demo_actor(actor):
data["arbitrary_data"] = str(actor.call_method("get_arbitrary_data"))
data["prefixed_data"] = str(actor.call_method("add_prefix", ("my_input",)))
print(
f"Processed the following additional data on {data['display_name']}:\n\t"
f"arbitrary_data: {data['arbitrary_data']}\n\t"
f"prefixed_data : {data['prefixed_data']}"
)
# print out the component hierarchy of this actor
print(f"{actor.get_actor_label()} components:")
walk_component(actor.root_component, actor)
return data
def walk_component(component, owner=None, indent=2):
"""
walk the given component's hierarchy and print it
It's best to call this on an actor's root component
parameters:
component: the component to process
owner: the actor that owns this component,
if provided this will keep the results
local to the immediate actor
"""
if not component:
return
# validate the component's owner
if not owner:
owner = component.get_owner()
if component.get_owner() != owner:
return
print(f"{' '*indent}{component.get_name()} (component)")
# recurse through any immediate children
for child in component.get_children_components(False):
walk_component(child, owner , indent+2)
def get_actor_root_transform(actor):
"""
Get the actor's root component transform data as a dict
parameters:
actor: the actor to process
returns:
dict: the actor's root transform data as a dict
"""
# an actor's transforms is stored on its root component
root = actor.root_component
if not root:
return dict()
# break up the transform data to make it easier to pack into json
xform = root.get_relative_transform()
translate = xform.translation
rotate = xform.rotation.rotator()
scale = xform.scale3d
return {
"location": [float(v) for v in [translate.x, translate.y, translate.z]],
"rotate": [float(v) for v in [rotate.roll, rotate.pitch, rotate.yaw]],
"scale": [float(v) for v in [scale.x, scale.y, scale.z]],
"is_absolute": [root.absolute_location, root.absolute_rotation, root.absolute_scale]
}
def get_asset_from_actor(actor):
"""
Get the content browser asset path of the given actor,
support must be added for each asset type
"""
asset = None
# the source asset is usually stored on the root component
# and is generally unique per component class type,
# support will need to be added for each Actor class (joy)
if isinstance(actor.get_class(), unreal.BlueprintGeneratedClass):
asset = unreal.load_asset(actor.get_class().get_outer().get_path_name())
elif isinstance(actor, unreal.StaticMeshActor):
asset = actor.static_mesh_component.static_mesh
elif isinstance(actor, unreal.SkeletalMeshActor):
asset = actor.skeletal_mesh_component.skeletal_mesh_asset
elif isinstance(actor, unreal.NiagaraActor):
asset = actor.niagara_component.get_asset()
elif isinstance(actor, unreal.LevelInstance):
asset = actor.get_world_asset().get_outer().get_path_name()
else:
unreal.log_warning(
f"\n\tActor {actor.get_actor_label()} has an unknown "
f" or unsupported source asset ({actor.get_class()})"
f"\n\t\tEither the actor does not have a source asset"
f" in the Content Browser"
f"\n\t\tor get_asset_from_actor() does not yet support"
f" its class type"
)
return asset
def toggle_selection_tracking(active: bool = True):
"""
Enable the tracking of the user's selection
"""
# Track the actor selection changes in the 3D level
level_editor_subsystem = unreal.get_editor_subsystem(unreal.LevelEditorSubsystem)
selection_set = level_editor_subsystem.get_selection_set()
selection_set.on_selection_change.remove_callable(selection_tracker)
if active:
selection_set.on_selection_change.add_callable(selection_tracker)
def selection_tracker(selection_set):
"""
List the contents of the provided selection set object
parameters:
selection_set: the unreal.TypedElementSelectionSet selection set to query
"""
if selection_set.get_num_selected_elements():
print(f"The following objects are currently selected:")
for selected in selection_set.get_selected_objects():
print(f"\t{selected.get_path_name()}")
else:
print("no objects selected!")
|
import unreal
from unreal import MaterialInstanceConstant
# consolidate together Material Instances assets having the same name
# WARNING: will erase consolidated assets
# retrieves all assets from the directory and its sub directories
asset_subsys = unreal.get_editor_subsystem(unreal.EditorAssetSubsystem)
all_asset_names = asset_subsys.list_assets("/project/", True, False)
# loads all assets of the MaterialInstanceConstant class
material_assets = []
for asset_name in all_asset_names:
loaded_asset = asset_subsys.load_asset(asset_name)
if loaded_asset.__class__ == MaterialInstanceConstant:
material_assets.append(loaded_asset)
# regroup assets having identical names
asset_consolidation = {}
for i in range(0, len(material_assets)):
name = material_assets[i].get_name()
if not name in asset_consolidation:
asset_consolidation[name] = []
asset_consolidation[name].append(i)
# consolidate references of identical assets
for asset_name, assets_ids in asset_consolidation.items():
if len(assets_ids) < 2:
continue
asset_subsys.consolidate_assets(material_assets[assets_ids[0]], [material_assets[i] for i in assets_ids[1:]])
# Need to fixup redirectors after that, though it's not accessible from Python
# UAssetTools::FixupReferencers not exposed as of 4.26
#
# It has to be done from the Editor or a Commandlet
# https://docs.unrealengine.com/en-us/project/
|
import json
from enum import Enum, auto
import inspect
from typing import Callable, Union
from concurrent.futures import ThreadPoolExecutor, Future
from threading import Lock
import logging
import unreal
logger = logging.getLogger(__name__)
class FuncType(Enum):
STATIC_METHOD = auto()
CLASS_METHOD = auto()
LAMBDA = auto()
UNBOUND_METHOD = auto()
INSTANCE_METHOD = auto()
INSTANCE_METHOD_OF_CLASS = auto()
STATIC_FUNCTION = auto()
BUILTIN = auto()
UNKNOWN = auto()
def get_func_type(callback: callable, cls=None) -> FuncType:
if isinstance(callback, staticmethod):
return FuncType.STATIC_METHOD
if not callable(callback):
raise ValueError("callback must be a callable object")
if cls:
for _, obj in cls.__dict__.items():
if obj is callback:
if isinstance(obj, staticmethod):
return FuncType.STATIC_METHOD
elif isinstance(obj, classmethod):
return FuncType.CLASS_METHOD
break
elif isinstance(callback, staticmethod):
return FuncType.STATIC_METHOD
if hasattr(callback, "__name__") and callback.__name__ == "<lambda>":
return FuncType.LAMBDA
if inspect.ismethod(callback):
if callback.__self__ is None:
return FuncType.UNBOUND_METHOD
elif isinstance(callback.__self__, type):
return FuncType.CLASS_METHOD
else:
return FuncType.INSTANCE_METHOD
if inspect.isfunction(callback):
params_names = list(inspect.signature(callback).parameters.keys())
if params_names and params_names[0] == "self":
return FuncType.INSTANCE_METHOD_OF_CLASS
return FuncType.STATIC_FUNCTION
if inspect.isbuiltin(callback):
return FuncType.BUILTIN
return FuncType.UNKNOWN
class ChameleonTaskExecutor:
"""
ChameleonTaskExecutor is a class for managing and executing tasks in parallel.
It uses a ThreadPoolExecutor to run tasks concurrently.
"""
def __init__(self, owner):
"""
Initialize the ChameleonTaskExecutor with the owner of the tasks.
"""
assert isinstance(owner.data, unreal.ChameleonData)
self.owner = owner
self.executor = ThreadPoolExecutor()
self.futures_dict = {}
self.lock = Lock()
@staticmethod
def _find_var_name_in_outer(target_var, by_type:bool=False)->str:
frames = inspect.getouterframes(inspect.currentframe())
top_frame = frames[-1]
instance_name_in_global = ""
for k, v in top_frame.frame.f_globals.items():
if by_type:
if isinstance(v, target_var):
# print(f"!! found: {k} @ frame: frame count: {len(frames)}")
instance_name_in_global = k
break
if type(v) == target_var:
# print(f"!! found: {k} @ frame: frame count: {len(frames)}")
instance_name_in_global = k
break
else:
if v == target_var:
# print(f"!! found: {k} @ frame: frame count: {len(frames)}")
instance_name_in_global = k
break
return instance_name_in_global
@staticmethod
def _number_of_param(callback)->int:
try:
if isinstance(callback, str):
param_str = callback[callback.find("("): callback.find(")") + 1].strip()
return param_str[1:-1].find(",")
else:
sig = inspect.signature(callback)
param_count = len(sig.parameters)
return param_count
except Exception as e:
print(e)
return 0
@staticmethod
def _get_balanced_bracket_code(content, file_name, lineno):
def _is_brackets_balanced(content):
v = 0
for c in content:
if c == "(":
v += 1
elif c == ")":
v -= 1
return v == 0
if "(" in content and _is_brackets_balanced(content):
return content
try:
with open(file_name, 'r', encoding="utf-8") as f:
lines = f.readlines()
line = ""
for i in range(lineno-1, len(lines)):
line += lines[i].strip()
if "(" in line and _is_brackets_balanced(line):
return line
except Exception as e:
raise RuntimeError(f"Failed to process file {file_name} line {lineno} : {e}")
return None
@staticmethod
def get_cmd_str_from_callable(callback: Union[callable, str]) -> str:
"""Get the command string from a callable object. The command string is used to call the callable object"""
if isinstance(callback, str):
return callback
callback_type = get_func_type(callback)
if callback_type == FuncType.BUILTIN:
return "{}(%)".format(callback.__qualname__)
elif callback_type == FuncType.LAMBDA:
raise ValueError("Lambda function is not supported")
else:
frames = inspect.getouterframes(inspect.currentframe())
last_callable_frame_idx = -1
for i, frame in enumerate(frames):
for var_name, var_value in frame.frame.f_locals.items():
if callable(var_value) and hasattr(var_value, "__code__"):
if var_value.__code__ == callback.__code__:
last_callable_frame_idx = i
# The upper frame of the last callable frame is the frame that contains the callback,
# so we can get the code context of the callback from the upper frame
upper_frame = frames[last_callable_frame_idx + 1] if len(frames) > last_callable_frame_idx + 1 else None
code_context = "".join(upper_frame.code_context)
code_line = ChameleonTaskExecutor._get_balanced_bracket_code(code_context, upper_frame.filename, upper_frame.lineno)
callback_params = code_line[code_line.index("(") + 1: code_line.rfind(")")].split(",")
callback_param = ""
for param in callback_params:
if callback.__name__ in param:
callback_param = param if "=" not in param else param[param.index('=')+1:]
break
if callback_param:
# found
if callback_type == FuncType.INSTANCE_METHOD or callback_param.startswith("self."):
instance_name = ChameleonTaskExecutor._find_var_name_in_outer(upper_frame.frame.f_locals["self"])
cmd = f"{instance_name}.{callback_param[callback_param.index('.') + 1:]}(%)"
else:
cmd = f"{callback_param}(%)"
return cmd
return f"{callback.__qualname__}(%)"
def submit_task(self, task:Callable, args=None, kwargs=None, on_finish_callback: Union[Callable, str] = None)-> int:
"""
Submit a task to be executed. The task should be a callable object.
Args and kwargs are optional arguments to the task.
Callback is an optional function to be called when the task is done.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
future = self.executor.submit(task, *args, **kwargs)
assert future is not None, "future is None"
future_id = id(future)
with self.lock:
self.futures_dict[future_id] = future
cmd = ChameleonTaskExecutor.get_cmd_str_from_callable(on_finish_callback)
param_count = ChameleonTaskExecutor._number_of_param(on_finish_callback)
cmd = cmd.replace("%", str(future_id) if param_count else "")
def _func(_future):
unreal.PythonBPLib.exec_python_command(cmd, force_game_thread=True)
future.add_done_callback(_func)
unreal.log(f"submit_task callback cmd: {cmd}, param_count: {param_count}")
return future_id
def get_future(self, future_id)-> Future:
with self.lock:
return self.futures_dict.get(future_id, None)
def get_task_is_running(self, future_id)-> bool:
future = self.get_future(future_id)
if future is not None:
return future.running()
return False
def is_any_task_running(self):
for future_id in self.futures_dict.keys():
if self.get_task_is_running(future_id):
return True
return False
|
import unreal
unreal.log("Hello World From A Script")
|
import unreal
# Create all assets and objects we'll use
lvs = unreal.VariantManagerLibrary.create_level_variant_sets_asset("LVS", "/Game/")
lvs_actor = unreal.VariantManagerLibrary.create_level_variant_sets_actor(lvs)
if lvs is None or lvs_actor is None:
print ("Failed to spawn either the LevelVariantSets asset or the LevelVariantSetsActor!")
quit()
# Create a variant set and add it to lvs
var_set1 = unreal.VariantSet()
var_set1.set_display_text("My VariantSet")
lvs.add_variant_set(var_set1)
# Create a variant and add it to var_set1
var1 = unreal.Variant()
var1.set_display_text("Variant 1")
var_set1.add_variant(var1)
# Create a test actor and add it to var1. The test actor has almost all possible types of capturable properties
location = unreal.Vector()
rotation = unreal.Rotator()
test_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.VariantManagerTestActor, location, rotation)
var1.add_actor_binding(test_actor)
capturable_props = unreal.VariantManagerLibrary.get_capturable_properties(test_actor)
captured_props = []
print ("Capturable properties for actor '" + test_actor.get_actor_label() + "':")
for prop in capturable_props:
print ("\t" + prop)
# All test properties are named like 'Captured____Property'
# The check here avoids capturing generic Actor properties like 'Can be Damaged'
if str(prop).startswith('Captured') and str(prop).endswith('Property'):
new_prop = var1.capture_property(test_actor, prop)
captured_props.append(new_prop)
for prop in captured_props:
type_str = prop.get_property_type_string()
# Set a value for a property depending on its type
if type_str == "bool":
prop.set_value_bool(True)
elif type_str == "int":
prop.set_value_int(2)
elif type_str == "float":
prop.set_value_float(2.0)
elif type_str == "object":
cube = unreal.EditorAssetLibrary.load_asset("StaticMesh'/project/.Cube'")
prop.set_value_object(cube)
elif type_str == "strint":
prop.set_value_string("new string")
elif type_str == "rotator":
prop.set_value_rotator(unreal.Rotator(11, 12, 13))
elif type_str == "color":
prop.set_value_color(unreal.Color(21, 22, 23, 24))
elif type_str == "linear_color":
prop.set_value_linear_color(unreal.LinearColor(0.31, 0.32, 0.33, 0.34))
elif type_str == "vector":
prop.set_value_vector(unreal.Vector(41, 42, 43))
elif type_str == "quat":
prop.set_value_quat(unreal.Quat(0.51, 0.52, 0.53, 0.54))
elif type_str == "vector4":
prop.set_value_vector4(unreal.Vector4(6.1, 6.2, 6.3, 6.4))
elif type_str == "Vector2D":
prop.set_value_vector2d(unreal.Vector2D(7.1, 7.2))
elif type_str == "int_Point":
prop.set_value_int_point(unreal.IntPoint(81, 82))
# Easier to print using getattr
for prop in captured_props:
type_str = prop.get_property_type_string()
print (getattr(prop, "get_value_" + type_str)())
|
import unreal
actor: unreal.Actor
unreal.EditorLevelLibrary.pilot_level_actor(actor)
|
# coding: utf-8
import unreal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-vrm")
parser.add_argument("-rig")
parser.add_argument("-meta")
args = parser.parse_args()
#print(args.vrm)
humanoidBoneList = [
"hips",
"leftUpperLeg",
"rightUpperLeg",
"leftLowerLeg",
"rightLowerLeg",
"leftFoot",
"rightFoot",
"spine",
"chest",
"upperChest",
"neck",
"head",
"leftShoulder",
"rightShoulder",
"leftUpperArm",
"rightUpperArm",
"leftLowerArm",
"rightLowerArm",
"leftHand",
"rightHand",
"leftToes",
"rightToes",
"leftEye",
"rightEye",
"jaw",
"leftThumbProximal",
"leftThumbIntermediate",
"leftThumbDistal",
"leftIndexProximal",
"leftIndexIntermediate",
"leftIndexDistal",
"leftMiddleProximal",
"leftMiddleIntermediate",
"leftMiddleDistal",
"leftRingProximal",
"leftRingIntermediate",
"leftRingDistal",
"leftLittleProximal",
"leftLittleIntermediate",
"leftLittleDistal",
"rightThumbProximal",
"rightThumbIntermediate",
"rightThumbDistal",
"rightIndexProximal",
"rightIndexIntermediate",
"rightIndexDistal",
"rightMiddleProximal",
"rightMiddleIntermediate",
"rightMiddleDistal",
"rightRingProximal",
"rightRingIntermediate",
"rightRingDistal",
"rightLittleProximal",
"rightLittleIntermediate",
"rightLittleDistal",
]
for i in range(len(humanoidBoneList)):
humanoidBoneList[i] = humanoidBoneList[i].lower()
######
rigs = unreal.ControlRigBlueprint.get_currently_open_rig_blueprints()
#print(rigs)
for r in rigs:
s:str = r.get_path_name()
ss:str = args.rig
if (s.find(ss) < 0):
print("no rig")
else:
rig = r
print(unreal.SystemLibrary.get_engine_version())
if (unreal.SystemLibrary.get_engine_version()[0] == '5'):
c = rig.get_controller()#rig.controller
else:
c = rig.controller
g = c.get_graph()
n = g.get_nodes()
print(n)
#c.add_branch_node()
#c.add_array_pin()
a:unreal.RigUnit_CollectionItems = unreal.RigUnit_CollectionItems()
# print(a)
# 配列ノード追加
collectionItem_forControl:unreal.RigVMStructNode = None
collectionItem_forBone:unreal.RigVMStructNode = None
for node in n:
if (node.get_node_title() == 'Items' or node.get_node_title() == 'Collection from Items'):
#print(node.get_node_title())
#node = unreal.RigUnit_CollectionItems.cast(node)
pin = node.find_pin('Items')
print(pin.get_array_size())
print(pin.get_default_value())
if (pin.get_array_size() < 40):
continue
if 'Type=Bone' in pin.get_default_value():
collectionItem_forBone= node
if 'Type=Control' in pin.get_default_value():
collectionItem_forControl = node
#nn = unreal.EditorFilterLibrary.by_class(n,unreal.RigUnit_CollectionItems.static_class())
## meta 取得
reg = unreal.AssetRegistryHelpers.get_asset_registry();
a = reg.get_all_assets();
if (args.meta):
for aa in a:
if (aa.get_editor_property("object_path") == args.meta):
v:unreal.VrmMetaObject = aa
vv = aa.get_asset()
if (vv == None):
for aa in a:
if (aa.get_editor_property("object_path") == args.vrm):
v:unreal.VrmAssetListObject = aa
vv = v.get_asset().vrm_meta_object
#print(vv)
meta = vv
# controller array
if (collectionItem_forControl == None):
collectionItem_forControl = c.add_struct_node(unreal.RigUnit_CollectionItems.static_struct(), method_name='Execute')
items_forControl = collectionItem_forControl.find_pin('Items')
c.clear_array_pin(items_forControl.get_pin_path())
# bone array
if (collectionItem_forBone == None):
collectionItem_forBone = c.add_struct_node(unreal.RigUnit_CollectionItems.static_struct(), method_name='Execute')
items_forBone = collectionItem_forBone.find_pin('Items')
c.clear_array_pin(items_forBone.get_pin_path())
## h_mod
rigs = unreal.ControlRigBlueprint.get_currently_open_rig_blueprints()
rig = rigs[0]
print(items_forControl)
print(items_forBone)
humanoidBoneTable = {"dummy" : "dummy"}
humanoidBoneTable.clear()
for h in meta.humanoid_bone_table:
bone_h = "{}".format(h).lower()
bone_m = "{}".format(meta.humanoid_bone_table[h]).lower()
try:
i = list(humanoidBoneTable.values()).index(bone_m)
except:
i = -1
if (bone_h!="" and bone_m!="" and i==-1):
humanoidBoneTable[bone_h] = bone_m
for bone_h in humanoidBoneList:
bone_m = humanoidBoneTable.get(bone_h, None)
if bone_m == None:
continue
#for bone_h in meta.humanoid_bone_table:
# bone_m = meta.humanoid_bone_table[bone_h]
# try:
# i = humanoidBoneList.index(bone_h.lower())
# except:
# i = -1
# if (i >= 0):
if (True):
tmp = '(Type=Bone,Name='
#tmp += "{}".format(bone_m).lower()
tmp += bone_m
tmp += ')'
c.add_array_pin(items_forBone.get_pin_path(), default_value=tmp)
#print(bone_m)
tmp = '(Type=Control,Name='
#tmp += "{}".format(bone_h).lower() + '_c'
tmp += bone_h + '_c'
tmp += ')'
#print(c)
c.add_array_pin(items_forControl.get_pin_path(), default_value=tmp)
#print(bone_h)
#for e in h_mod.get_elements():
# if (e.type == unreal.RigElementType.CONTROL):
# tmp = '(Type=Control,Name='
# tmp += "{}".format(e.name)
# tmp += ')'
# c.add_array_pin(items_forControl.get_pin_path(), default_value=tmp)
# print(e.name)
# if (e.type == unreal.RigElementType.BONE):
# tmp = '(Type=Bone,Name='
# tmp += "{}".format(e.name)
# tmp += ')'
# c.add_array_pin(items_forBone.get_pin_path(), default_value=tmp)
# print(e.name)
#print(i.get_all_pins_recursively())
#ii:unreal.RigUnit_CollectionItems = n[1]
#pp = ii.get_editor_property('Items')
#print(pp)
#print(collectionItem.get_all_pins_recursively()[0])
#i.get_editor_property("Items")
#c.add_array_pin("Execute")
# arrayを伸ばす
#i.get_all_pins_recursively()[0].get_pin_path()
#c.add_array_pin(i.get_all_pins_recursively()[0].get_pin_path(), default_value='(Type=Bone,Name=Global)')
#rig = rigs[10]
|
import unreal
# Define the path where you want to create the Blueprint
folder_path = "/project/" # Change this to your desired folder path
blueprint_name = "MyNewActorBlueprint"
component_class_path = "/project/.GameItem" # The path to your custom Actor component
# Ensure the folder exists
unreal.EditorAssetLibrary.make_directory(folder_path)
# Define the Blueprint class type
blueprint_factory = unreal.BlueprintFactory()
blueprint_factory.set_editor_property("ParentClass", unreal.Actor)
# Create the Blueprint
blueprint_asset = unreal.AssetToolsHelpers.get_asset_tools().create_asset(
blueprint_name,
folder_path,
None,
blueprint_factory
)
# Find the custom component class
component_class = unreal.SystemLibrary.find_class(None, component_class_path)
# Ensure the component class is valid
if not component_class:
print(f"Component class {component_class_path} not found.")
else:
# Open the Blueprint Editor
blueprint_editor = unreal.AssetEditorSubsystem().open_editor_for_assets([blueprint_asset])
# Get the Blueprint's Simple Construction Script (SCS)
scs = blueprint_asset.simple_construction_script
# Create a new SCS node for the component
new_node = scs.create_node(component_class, component_class_path.split('.')[-1])
# Add the new node to the SCS root nodes
scs.add_node(new_node)
# Mark the Blueprint as modified and save it
blueprint_asset.mark_package_dirty()
unreal.EditorAssetLibrary.save_loaded_asset(blueprint_asset)
print(f"Blueprint {blueprint_name} with component {component_class_path.split('.')[-1]} created successfully in {folder_path}")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" An example script that uses the API to instantiate an HDA and then
set 2 inputs: a geometry input (a cube) and a curve input (a helix). The
inputs are set during post instantiation (before the first cook). After the
first cook and output creation (post processing) the input structure is fetched
and logged.
"""
import math
import unreal
_g_wrapper = None
def get_test_hda_path():
return '/project/.copy_to_curve_1_0'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def get_geo_asset_path():
return '/project/.Cube'
def get_geo_asset():
return unreal.load_object(None, get_geo_asset_path())
def configure_inputs(in_wrapper):
print('configure_inputs')
# Unbind from the delegate
in_wrapper.on_post_instantiation_delegate.remove_callable(configure_inputs)
# Create a geo input
geo_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPIGeoInput)
# Set the input objects/assets for this input
geo_object = get_geo_asset()
if not geo_input.set_input_objects((geo_object, )):
# If any errors occurred, get the last error message
print('Error on geo_input: {0}'.format(geo_input.get_last_error_message()))
# copy the input data to the HDA as node input 0
in_wrapper.set_input_at_index(0, geo_input)
# We can now discard the API input object
geo_input = None
# Create a curve input
curve_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPICurveInput)
# Create a curve wrapper/helper
curve_object = unreal.HoudiniPublicAPICurveInputObject(curve_input)
# Make it a Nurbs curve
curve_object.set_curve_type(unreal.HoudiniPublicAPICurveType.NURBS)
# Set the points of the curve, for this example we create a helix
# consisting of 100 points
curve_points = []
for i in range(100):
t = i / 20.0 * math.pi * 2.0
x = 100.0 * math.cos(t)
y = 100.0 * math.sin(t)
z = i
curve_points.append(unreal.Transform([x, y, z], [0, 0, 0], [1, 1, 1]))
curve_object.set_curve_points(curve_points)
# Error handling/message example: try to set geo_object on curve input
if not curve_input.set_input_objects((geo_object, )):
print('Error (example) while setting \'{0}\' on curve input: {1}'.format(
geo_object.get_name(), curve_input.get_last_error_message()
))
# Set the curve wrapper as an input object
curve_input.set_input_objects((curve_object, ))
# Copy the input data to the HDA as node input 1
in_wrapper.set_input_at_index(1, curve_input)
# We can now discard the API input object
curve_input = None
# Check for errors on the wrapper
last_error = in_wrapper.get_last_error_message()
if last_error:
print('Error on wrapper during input configuration: {0}'.format(last_error))
def print_api_input(in_input):
print('\t\tInput type: {0}'.format(in_input.__class__))
print('\t\tbKeepWorldTransform: {0}'.format(in_input.keep_world_transform))
print('\t\tbImportAsReference: {0}'.format(in_input.import_as_reference))
if isinstance(in_input, unreal.HoudiniPublicAPIGeoInput):
print('\t\tbPackBeforeMerge: {0}'.format(in_input.pack_before_merge))
print('\t\tbExportLODs: {0}'.format(in_input.export_lo_ds))
print('\t\tbExportSockets: {0}'.format(in_input.export_sockets))
print('\t\tbExportColliders: {0}'.format(in_input.export_colliders))
elif isinstance(in_input, unreal.HoudiniPublicAPICurveInput):
print('\t\tbCookOnCurveChanged: {0}'.format(in_input.cook_on_curve_changed))
print('\t\tbAddRotAndScaleAttributesOnCurves: {0}'.format(in_input.add_rot_and_scale_attributes_on_curves))
input_objects = in_input.get_input_objects()
if not input_objects:
print('\t\tEmpty input!')
else:
print('\t\tNumber of objects in input: {0}'.format(len(input_objects)))
for idx, input_object in enumerate(input_objects):
print('\t\t\tInput object #{0}: {1}'.format(idx, input_object))
if isinstance(input_object, unreal.HoudiniPublicAPICurveInputObject):
print('\t\t\tbClosed: {0}'.format(input_object.is_closed()))
print('\t\t\tCurveMethod: {0}'.format(input_object.get_curve_method()))
print('\t\t\tCurveType: {0}'.format(input_object.get_curve_type()))
print('\t\t\tReversed: {0}'.format(input_object.is_reversed()))
print('\t\t\tCurvePoints: {0}'.format(input_object.get_curve_points()))
def print_inputs(in_wrapper):
print('print_inputs')
# Unbind from the delegate
in_wrapper.on_post_processing_delegate.remove_callable(print_inputs)
# Fetch inputs, iterate over it and log
node_inputs = in_wrapper.get_inputs_at_indices()
parm_inputs = in_wrapper.get_input_parameters()
if not node_inputs:
print('No node inputs found!')
else:
print('Number of node inputs: {0}'.format(len(node_inputs)))
for input_index, input_wrapper in node_inputs.items():
print('\tInput index: {0}'.format(input_index))
print_api_input(input_wrapper)
if not parm_inputs:
print('No parameter inputs found!')
else:
print('Number of parameter inputs: {0}'.format(len(parm_inputs)))
for parm_name, input_wrapper in parm_inputs.items():
print('\tInput parameter name: {0}'.format(parm_name))
print_api_input(input_wrapper)
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset with auto-cook enabled
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform())
# Configure inputs on_post_instantiation, after instantiation, but before first cook
_g_wrapper.on_post_instantiation_delegate.add_callable(configure_inputs)
# Print the input state after the cook and output creation.
_g_wrapper.on_post_processing_delegate.add_callable(print_inputs)
if __name__ == '__main__':
run()
|
import unreal
import sys
sys.path.append("/project/-packages")
from PySide import QtCore, QtGui, QtUiTools
editor_level_lib = unreal.EditorLevelLibrary()
class SimpleGUI(QtGui.QWidget):
def __init__(self, parent=None):
super(SimpleGUI, self).__init__(parent)
#load the created ui widget
self.widget = QtUiTools.QUiLoader().load("/project/.ui")
# attach the widget to the "self" GUI
self.widget.setParent(self)
# set the UI geometry (if UI is not centered/visible)
self.widget.setGeometry(0, 0, self.widget.width(), self.widget.height())
self.text_l = self.widget.findChild(QtGui.QLineEdit, "TextBox_L")
self.text_r = self.widget.findChild(QtGui.QLineEdit, "TextBox_R")
self.checkbox = self.widget.findChild(QtGui.QCheckBox, "CheckBox")
#find and assign slider
self.slider = self.widget.findChild(QtGui.QSlider, "horizontalSlider")
self.slider.sliderMoved.connect(self.on_slide)
# find buttons and set up handlers
self.btn_ok = self.widget.findChild(QtGui.QPushButton, "okButton")
self.btn_ok.clicked.connect(self.ok_clicked)
self.btn_cancel = self.widget.findChild(QtGui.QPushButton, "cancelButton")
self.btn_cancel.clicked.connect(self.cancel_clicked)
# triggered on clicked of okButton
def ok_clicked(self):
text_l = self.text_l.text()
text_r = self.text_r.text()
is_checked = self.checkbox.isChecked()
unreal.log("Text Left Value: {}".format(text_l))
unreal.log("Text Right Value: {}".format(text_r))
unreal.log("CheckBox Value: {}".format(is_checked))
def cancel_clicked(self):
unreal.log("Canceled")
self.close()
def on_slide(self):
slider_value = self.slider.value()
# move the selected actor according to the slider value
selected_actors = editor_level_lib.get_selected_level_actors()
if len(selected_actors) > 0:
actor = selected_actors[0]
#get old trensform, change y axis value and write back
new_transform = actor.get_actor_transform()
new_transform.translation.y = slider_value
actor.set_actor_transform(new_transform, True, True)
# only created an instance of the GUI when it's not already running
app = None
if not QtGui.QApplication.instance():
app = QtGui.QApplication(sys.argv)
# start the GUI
main_window = SimpleGUI()
main_window.show()
|
import unreal
def filter_out_skeletal_mesh() -> [unreal.SkeletalMesh]:
skeletal_meshes = []
for asset in unreal.EditorUtilityLibrary.get_selected_assets():
if isinstance(asset, unreal.SkeletalMesh):
skeletal_meshes.append(asset)
else:
unreal.EditorDialog.show_message(
title="错误",
message=f"选中的资产中只能包含骨骼网格体",
message_type=unreal.AppMsgType.OK
)
exit()
return skeletal_meshes
|
import unreal
blendSpace_dir = input()
#blendSpace_dir = "/project/"
bs_lists = unreal.EditorAssetLibrary.list_assets(blendSpace_dir)
bs_assets_list = []
for i in bs_lists:
bs_assets_list.append ( unreal.EditorAssetLibrary.load_asset(i) )
for i in bs_assets_list:
num_sample = i.get_editor_property("sample_data")
num_count = len(num_sample)
if num_count == 0:
print (num_count)
|
import unreal
import time
asset_path = "/project/"
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
all_assets = unreal.EditorAssetLibrary.list_assets(asset_path)
all_assets_loaded = [unreal.EditorAssetLibrary.load_asset(a) for a in all_assets]
texture_cube_assets = unreal.EditorFilterLibrary.by_class(all_assets_loaded, unreal.TextureCube)
envIndex = -1
tickhandle = None
def startGame():
unreal.EditorLevelLibrary.editor_play_simulate()
def endGame():
unreal.SystemLibrary.execute_console_command(unreal.EditorLevelLibrary.get_game_world(), "Exit")
def exchangeEnvironment(index):
global all_actors
global texture_cube_assets
if not len(texture_cube_assets) > 0:
unreal.log_error("No cubemap textures found!")
else:
for actor in all_actors:
if not actor.get_name().find("light_HDRIBackdrop") == -1: # get HDRIBackdrop actor in scene
actor.set_editor_property("Cubemap", texture_cube_assets[index])
unreal.log_warning("New cubemap: {}".format(actor.get_editor_property("Cubemap")))
#unreal.EditorLevelLibrary.save_current_level()
def renderAllEnvironments(deltaTime):
global envIndex
global texture_cube_assets
# check if game has been quit: no game world = no running game
# (could not find a better solution in the API)
if(unreal.EditorLevelLibrary.get_game_world() == None):
envIndex += 1
if(envIndex < len(texture_cube_assets) and envIndex >= 0):
exchangeEnvironment(envIndex)
startGame()
else:
unreal.unregister_slate_pre_tick_callback(tickhandle)
else:
unreal.log_warning("game still running.")
tickhandle = unreal.register_slate_pre_tick_callback(renderAllEnvironments)
|
# Copyright Epic Games, Inc. All Rights Reserved.
""" Test (and demo) UE Python stub type hinting/syntax highlighting.
This module checks if Python stub type hinting works properly. The file can
be executed, but it is also meant to be loaded in a third party Python editor
that support type checking (VSCode/PyCharm) and syntax highlighting so that a
programmer can visually check if the hinted types and the syntax highlight is
working as expected.
WARNING: Ensure you select 'Type Checker' type hinting mode in the Editor Preferences
to generate an accurate typing. The 'Auto-Completion' mode omits type
coercion which will raise sevaral type check in thie file.
"""
import unreal
import typing
import random
def local_print(arg: typing.Any) -> None:
""" Useful for debugging, but mostly to remove warnings about unused variables."""
print(arg)
def take_sequence_types(seq_obj: typing.MutableSequence[typing.Any]) -> None:
""" Test if unreal.Array and unreal.FixedArray can be passed to API expecting a typing.Sequence type. """
local_print(seq_obj)
def take_iterable_types(iter_obj: typing.Iterable[typing.Any]) -> None:
""" Test if containers can be passed to API expecting an iterable type. """
local_print(iter_obj)
def take_set_types(set_obj: typing.MutableSet[typing.Any]) -> None:
""" Test if unreal.Set can be passed to API expecting a typing.Set type. """
local_print(set_obj)
def take_mapping_types(mapping_obj: typing.MutableMapping[typing.Any, typing.Any]) -> None:
""" Test if unreal.Map can be passed to API expecting a typing.Mapping type. """
local_print(mapping_obj)
class DelegateCallable:
""" Used to verify if a callable method is properly added/project/ from a multicast delegate. """
def __init__(self) -> None:
self.callback_count = 0
def delegate_callback(self, int_param: int) -> int:
self.callback_count += 1
local_print(int_param)
return self.callback_count
def multicast_delegate_callback(self, str_param: str) -> None:
self.callback_count += 1
local_print(str_param)
def test_name_wrapper() -> None:
from_default: unreal.Name = unreal.Name()
local_print(from_default)
from_another: unreal.Name = unreal.Name.cast(from_default)
local_print(from_another)
from_copy: unreal.Name = unreal.Name(from_default)
local_print(from_copy)
from_prop: object = unreal.PyTestStruct().get_editor_property("name")
local_print(unreal.Name.cast(from_prop))
from_str: unreal.Name = unreal.Name("Hi")
valid: bool = from_str.is_valid()
local_print(valid)
from_str_cast: unreal.Name = unreal.Name.cast("None")
none_value: bool = from_str_cast.is_none()
local_print(none_value)
def test_text_wrapper() -> None:
from_default: unreal.Text = unreal.Text()
local_print(from_default)
from_another: unreal.Text = unreal.Text.cast(from_default)
local_print(from_another)
from_copy: unreal.Text = unreal.Text(from_default)
local_print(from_copy)
from_prop: object = unreal.PyTestStruct().get_editor_property("text")
local_print(unreal.Text.cast(from_prop))
from_str: unreal.Text = unreal.Text("Hi")
local_print(from_str)
from_str_cast: unreal.Text = unreal.Text.cast("Some Text")
local_print(from_str_cast)
from_float: unreal.Text = unreal.Text.as_number(10.3)
local_print(from_float)
from_int: unreal.Text = unreal.Text.as_number(100)
local_print(from_int)
from_float_pct: unreal.Text = unreal.Text.as_percent(0.10) # 10%
local_print(from_float_pct)
from_int_pct: unreal.Text = unreal.Text.as_percent(1) # 100%
local_print(from_int_pct)
from_currency: unreal.Text = unreal.Text.as_currency(650, "US") # $6.50
local_print(from_currency)
empty: bool = from_str.is_empty() or from_str.is_empty_or_whitespace()
local_print(empty)
transient: bool = from_str.is_transient()
local_print(transient)
culture_invariant: bool = from_str.is_culture_invariant()
local_print(culture_invariant)
from_string_table: bool = from_str.is_from_string_table()
local_print(from_string_table)
lower_str: unreal.Text = from_str.to_lower()
local_print(lower_str)
upper_str: unreal.Text = from_str.to_upper()
local_print(upper_str)
fmt_seq_args: unreal.Text = unreal.Text("Hello {0}! I'm {1} and I'm {2} years old")
formatted_seq_args: unreal.Text = fmt_seq_args.format("Everybody", "Joe", 44)
local_print(formatted_seq_args)
fmt_mapping_args: unreal.Text = unreal.Text("Hello {a1}! I'm {a2} and I'm {a3} years old")
formatted_mapping_args: unreal.Text = fmt_mapping_args.format({"a1": "Everybody", "a2": "Joe", "a3": 44})
local_print(formatted_mapping_args)
# NOTE: This version works, but the argument names are dynamic, so type checker issue a warning.
fmt_named_args: unreal.Text = unreal.Text("Hello {a1}! I'm {a2} and I'm {a3} years old")
formatted_named_args: unreal.Text = fmt_named_args.format(a1="Everybody", a2="Joe", a3=44) # type: ignore
local_print(formatted_named_args)
def test_delegate_wrapper() -> None:
# Defines a callback that match FPyTestDelegate signature. (Need to check the C++ code for the signature)
def callback(int_param: int) -> int:
return int_param * 2
from_default: unreal.PyTestDelegate = unreal.PyTestDelegate()
local_print(from_default)
from_another: unreal.PyTestDelegate = unreal.PyTestDelegate.cast(from_default)
local_print(from_another)
from_copy: unreal.PyTestDelegate = from_default.copy()
local_print(from_copy)
from_prop: object = unreal.PyTestObject().get_editor_property("delegate")
local_print(unreal.PyTestDelegate.cast(from_prop))
# is_bound(), bind_delegate(), bind_function(), bind_callable(), unbind(), execute() and execute_if_bound().
o = unreal.PyTestObject()
from_default.bind_function(o, "delegate_property_callback") # PyTestObject has a UFUNCTION named DelegatePropertyCallback.
from_default.bind_callable(callback) # Replace the binding.
from_copy.bind_delegate(from_default)
if from_default.is_bound():
value: int = from_default.execute(33) # This delegate takes an int and returns an int.
local_print(value)
from_default.unbind()
if from_copy.is_bound():
value: int = from_copy.execute_if_bound(44) # This delegate takes an int and returns an int.
local_print(value)
from_copy.unbind()
def test_multicast_delegate_wrapper() -> None:
# Defines a callback that match FPyTestMulticastDelegate signature. (Need to check the C++ code for the signature)
def callback(str_param: str) -> None:
local_print(str_param)
from_default: unreal.PyTestMulticastDelegate = unreal.PyTestMulticastDelegate()
local_print(from_default)
from_another: unreal.PyTestMulticastDelegate = unreal.PyTestMulticastDelegate.cast(from_default)
local_print(from_another)
from_copy: unreal.PyTestMulticastDelegate = from_default.copy()
local_print(from_copy)
from_prop: object = unreal.PyTestObject().get_editor_property("multicast_delegate")
local_print(unreal.PyTestMulticastDelegate.cast(from_prop))
bound: bool = from_default.is_bound()
local_print(bound)
o = unreal.PyTestObject()
from_default.add_function(o, "multicast_delegate_property_callback")
from_default.remove_function(o, "multicast_delegate_property_callback")
from_default.add_function_unique(o, "multicast_delegate_property_callback")
func_in: bool = from_default.contains_function(o, "multicast_delegate_property_callback")
from_default.remove_object(o)
from_default.add_callable(callback)
from_default.remove_callable(callback)
from_default.add_callable_unique(callback)
callable_in: bool = from_default.contains_callable(callable)
from_default.remove_callable(callback)
from_default.clear()
bound: bool = from_default.is_bound()
if func_in or callable_in or bound:
from_default.broadcast("hi")
def test_field_type_wrapper() -> None:
from_default: unreal.FieldPath = unreal.FieldPath()
local_print(from_default)
from_another: unreal.FieldPath = unreal.FieldPath(from_default)
local_print(from_another)
from_copy: unreal.FieldPath = from_default.copy()
local_print(from_copy)
from_prop: object = unreal.PyTestObject().get_editor_property("field_path")
local_print(unreal.FieldPath.cast(from_prop))
from_str: unreal.FieldPath = unreal.FieldPath("some_path")
local_print(from_str)
from_str_cast: unreal.FieldPath = unreal.FieldPath.cast("some_path")
local_print(from_str_cast)
valid: bool = from_str.is_valid()
local_print(valid)
def test_enum_wrapper() -> None:
from_value: unreal.PyTestEnum = unreal.PyTestEnum.ONE
local_print(from_value)
from_another: unreal.PyTestEnum = unreal.PyTestEnum.cast(from_value)
local_print(from_another)
from_prop: object = unreal.PyTestObject().get_editor_property("enum")
local_print(unreal.PyTestEnum.cast(from_prop))
static_enum: unreal.Enum = unreal.PyTestEnum.static_enum()
local_print(static_enum)
name: unreal.Text = from_value.get_display_name()
local_print(name)
def test_struct_wrapper() -> None:
""" Ensures that the struct wrapper is correctly hinted. """
from_default: unreal.PyTestStruct = unreal.PyTestStruct()
local_print(from_default)
from_another: unreal.PyTestStruct = unreal.PyTestStruct.cast(from_default)
local_print(from_another)
from_prop: object = unreal.PyTestObject().get_editor_property("struct")
local_print(unreal.PyTestStruct.cast(from_prop))
from_dict: unreal.PyTestStruct = unreal.PyTestStruct.cast({"int": 20, "string": "joe"}) # Partial mapping.
local_print(from_dict)
from_seq: unreal.PyTestStruct = unreal.PyTestStruct.cast([True, 20, 6.4, unreal.PyTestEnum.TWO, "joe"]) # Partial sequence.
local_print(from_seq)
from_tuple: unreal.PyTestStruct = unreal.PyTestStruct.cast(from_default.to_tuple())
local_print(from_tuple)
from_upcast: unreal.PyTestStruct = unreal.PyTestStruct.cast(unreal.PyTestChildStruct())
local_print(from_upcast)
from_downcast: unreal.PyTestChildStruct = unreal.PyTestChildStruct.cast(from_upcast)
local_print(from_downcast)
script_struct: unreal.ScriptStruct = unreal.PyTestStruct.static_struct()
print(script_struct)
# assign()
s: unreal.PyTestStruct = unreal.PyTestStruct()
assign_from_dict = s.assign({"string": "foo"})
local_print(assign_from_dict)
assign_from_seq = s.assign([True, 20, 6.4, unreal.PyTestEnum.TWO, "joe"])
local_print(assign_from_seq)
assign_from_other = s.assign(from_seq)
local_print(assign_from_other)
assign_from_derived = s.assign(unreal.PyTestChildStruct())
local_print(assign_from_derived)
values: typing.Tuple[object, ...] = s.to_tuple()
local_print(values)
prop: object = s.get_editor_property(name="string_array")
arr_prop: unreal.Array[str] = unreal.Array.cast(str, prop)
print(arr_prop)
s.set_editor_property(name="text", value=unreal.Text("some text"), notify_mode=unreal.PropertyAccessChangeNotifyMode.DEFAULT)
s.set_editor_properties({"int": 20, "string": "joe"})
exported_text: str = s.export_text()
s.import_text(content=exported_text)
def test_object_wrapper() -> None:
""" Ensures that the object wrapper class is correctly hinted. """
from_default: unreal.PyTestObject = unreal.PyTestObject()
local_print(from_default)
from_another: unreal.PyTestObject = unreal.PyTestObject.cast(from_default)
local_print(from_another)
from_prop: object = unreal.PyTestTypeHint().get_editor_property("object_prop")
local_print(unreal.PyTestObject.cast(from_prop))
from_new: unreal.PyTestObject = unreal.PyTestObject.cast(unreal.new_object(unreal.PyTestObject.static_class()))
local_print(from_new)
from_upcast: unreal.PyTestObject = unreal.PyTestObject.cast(unreal.new_object(unreal.PyTestChildObject.static_class())) # Upcast
local_print(from_upcast)
from_downcast: unreal.PyTestChildObject = unreal.PyTestChildObject.cast(from_upcast) # Downcast
local_print(from_downcast)
from_cdo: unreal.PyTestObject = unreal.PyTestObject.get_default_object()
local_print(from_cdo)
# static_class()/get_class()
static_cls: unreal.Class = unreal.PyTestObject.static_class()
local_print(static_cls)
instance_cls: unreal.Class = from_default.get_class()
local_print(instance_cls)
# get_outer()/get_typed_outer()/get_outermost()
outer: unreal.Object = from_default.get_outer()
local_print(outer)
typed_outer1: unreal.Object = from_default.get_typed_outer(unreal.Object)
typed_outer2: unreal.Object = from_default.get_typed_outer(unreal.Object.static_class())
local_print(typed_outer1)
local_print(typed_outer2)
outermost: unreal.Package = from_default.get_outermost()
local_print(outermost)
external_pkg: bool = from_default.is_package_external()
local_print(external_pkg)
pkg: unreal.Package = from_default.get_package()
local_print(pkg)
name: str = from_default.get_name()
local_print(name)
fname: unreal.Name = from_default.get_fname()
local_print(fname)
fullname: str = from_default.get_full_name()
local_print(fullname)
pathname: str = from_default.get_path_name()
local_print(pathname)
world: typing.Optional[unreal.World] = from_default.get_world()
local_print(world)
modified: bool = from_default.modify(True)
local_print(modified)
renamed: bool = unreal.PyTestObject().rename(name="NewName" + str(random.randint(0, 1000000)), outer=None)
local_print(renamed)
from_default.set_editor_property(name="String", value="foo", notify_mode=unreal.PropertyAccessChangeNotifyMode.DEFAULT)
from_default.set_editor_properties({"String": "foo", "Int": 14})
prop: object = from_default.get_editor_property(name="String")
local_print(prop)
retval: int = from_default.call_method("CallFuncBlueprintNative", (1,))
local_print(retval)
retval: int = from_default.call_method("CallFuncBlueprintNative", kwargs={"value": 1})
local_print(retval)
def test_fixed_array_wrapper() -> None:
a: unreal.FixedArray[float] = unreal.FixedArray(type=float, len=10)
local_print(a)
from_another: unreal.FixedArray[float] = unreal.FixedArray.cast(float, a)
local_print(from_another)
from_copy: unreal.FixedArray[float] = a.copy()
local_print(from_copy)
from_list: unreal.FixedArray[int] = unreal.FixedArray.cast(type=int, obj=[1, 2, 3])
local_print(from_list)
from_iterable: unreal.FixedArray[int] = unreal.FixedArray.cast(int, {0: "0", 1: "1"}.keys())
local_print(from_iterable)
# unreal.FixedArray can be passed as typing.Sequence/typing.Iterable type.
take_sequence_types(a)
take_iterable_types(a)
# __setitem__(self, index: int, value: T) -> None:
a[0] = 10.0
# __getitem__(self, index: int) -> T
flt: float = a[0]
local_print(flt)
# Check polymorphism.
poly: unreal.FixedArray[unreal.Object] = unreal.FixedArray(unreal.Object, 2)
poly[0] = unreal.Actor()
def test_array_wrapper() -> None:
a: unreal.Array[str] = unreal.Array(type=str)
from_another: unreal.Array[str] = unreal.Array.cast(type=str, obj=a)
local_print(from_another)
from_copy: unreal.Array[str] = a.copy()
local_print(from_copy)
from_prop: object = unreal.PyTestObject().get_editor_property("string_array")
local_print(unreal.Array.cast(str, from_prop))
from_list: unreal.Array[int] = unreal.Array.cast(int, [1.1, 2.2, 3.3]) # Converting floats into ints.
local_print(from_list)
from_tuple: unreal.Array[int] = unreal.Array.cast(int, (1, 2, 3))
local_print(from_tuple)
from_iterable: unreal.Array[int] = unreal.Array.cast(int, {0: "0", 1: "1"}.keys())
local_print(from_iterable)
# unreal.FixedArray can be passed as typing.Sequence/typing.Iterable type.
take_sequence_types(a)
take_iterable_types(a)
# append(self, value: T) -> None:
a.append("Hello")
# count(self, value: T) -> int:
local_print(a.count("b") == 1) # Print a bool
# extend(self, iterable: Iterable[T]) -> None:
a.extend(["a", "b"])
a.extend({"0": 10, "1": 20, "3": 30}.keys())
# index(self, value: T, start: int = 0, stop: int = -1) -> int
index: int = a.index(value="b", start=0, stop=-1)
local_print(index)
# insert(self, index: int, value: T) -> None:
a.insert(index=0, value="foo")
# pop(self, index: int = -1) -> T:
pop_value: str = a.pop(0)
local_print(pop_value)
# remove(self, value: T) -> None:
a.remove("b")
# reverse(self) -> None:
a.reverse()
# sort(self, key: Optional[Callable[T]] = None, reverse: bool = False) -> None:
a.sort(key=lambda item: len(item), reverse=True)
a.sort(key=None, reverse=True)
# resize(self, len: int) -> None:
a.resize(22)
# __setitem__/__getitem__
a[0] = "Bingo"
value: str = a[0]
local_print(value)
name_array: unreal.Array[typing.Union[str, unreal.Name]] = unreal.Array(unreal.Name)
name_array.append("hello") # string are convertible to unreal.Name().
text_array: unreal.Array[typing.Union[str, unreal.Text]] = unreal.Array(unreal.Text)
text_array.append("hello") # string are convertible to unreal.Text().
object_array: unreal.Array[typing.Optional[unreal.Object]] = unreal.Array(unreal.Object)
object_array.append(None)
object_array.append(unreal.PyTestObject()) # Polymorphism.
def test_set_wrapper() -> None:
s: unreal.Set[int] = unreal.Set(type=int)
from_another = unreal.Set.cast(type=int, obj=s)
local_print(from_another)
from_copy = s.copy()
local_print(from_copy)
from_prop: object = unreal.PyTestObject().get_editor_property("string_set")
local_print(unreal.Set.cast(str, from_prop))
from_set: unreal.Set[int] = unreal.Set.cast(int, {1, 2, 3})
local_print(from_set)
from_list: unreal.Set[int] = unreal.Set.cast(int, [1.1, 2.2, 3.3]) # Converting floats into ints.
local_print(from_list)
from_tuple: unreal.Set[int] = unreal.Set.cast(int, (1, 2, 3))
local_print(from_tuple)
from_iterable: unreal.Set[int] = unreal.Set.cast(int, {0: "0", 1: "1"}.keys())
local_print(from_iterable)
# unreal.Set can be passed to API expecting a typing.Set type.
take_set_types(s)
take_iterable_types(s)
s.add(0)
s.add(1)
s.discard(0)
s.add(0)
s.remove(0)
poped_out: int = s.pop()
local_print(poped_out)
s.clear()
s.difference_update({2, 3}, [3, 4])
diff: unreal.Set[int] = s.difference({2, 3}, [3, 4])
local_print(diff)
s.intersection_update([2, 3, 4, 8, 9])
intersection: unreal.Set[int] = s.intersection({1, 2}, [5, 6]) # Good
local_print(intersection)
s.symmetric_difference_update(from_iterable)
symmetric_diff: unreal.Set[int] = s.symmetric_difference(from_iterable)
local_print(symmetric_diff)
s.update({2, 3}, [3, 4], (6, 12, 13))
union: unreal.Set[int] = s.union({2, 3}, [3, 4], (6, 12, 13))
local_print(union)
disjoint: bool = s.isdisjoint(from_iterable)
local_print(disjoint)
subset: bool = s.issubset(from_iterable)
local_print(subset)
superset: bool = s.issubset(from_iterable)
local_print(superset)
# Check polymorphism.
poly: unreal.Set[unreal.Object] = unreal.Set(unreal.Object)
poly.add(unreal.Actor())
def test_map_wrapper() -> None:
local_print("== test_map_hinting ==")
# __init__(self, keys_type: type, values_type: type) -> None:
m: unreal.Map[int, str] = unreal.Map(key=int, value=str)
# unreal.Map can be passed to API expecting a typing.Mapping type.
take_mapping_types(m)
# __setitem__(self, key: KT, value: VT) -> None:
m[0] = "a"
m[1] = "b"
# __getitem__(self, key: KT) -> VT:
value: str = m.__getitem__(1)
local_print(value)
# cast(cls, keys_type: Type[KT], values_type: Type[VT], obj: Any) -> Map[KT, VT]:
m2: unreal.Map[int, str] = unreal.Map.cast(key=int, value=str, obj={0: "A", 1: "B"})
local_print(m2)
prop: object = unreal.PyTestObject().get_editor_property("string_int_map")
local_print(unreal.Map.cast(str, int, prop))
# __copy__(self) -> Map[KT, VT]:
m3: unreal.Map[int, str] = m2.copy()
local_print(m3)
# fromkeys(cls, iterable: Iterable[KT], value: Optional[VT] = None) -> Map[KT, VT]:
m4: unreal.Map[str, float] = unreal.Map.fromkeys(sequence=["A", "B", "C"], value=0.0)
m5: unreal.Map[str, float] = unreal.Map.fromkeys(("A", "B", "C"), 0.0)
m6: unreal.Map[str, float] = unreal.Map.fromkeys({"A": 0, "B": 1, "C": 2}, 0.0) # From the keys of a dict.
local_print(m4)
local_print(m5)
local_print(m6)
# get(self, key: KT, default: VT = ...) -> VT:
value: str = m.get(0)
local_print(value)
value: str = m.get(key=0, default="bar")
local_print(value)
# setdefault(self, key: KT, default: VT = ...) -> VT:
value: str = m.setdefault(99)
local_print(value)
value: str = m.setdefault(key=44, default="foo")
local_print(value)
# pop(self, key: KT, default: VT = ...) -> VT:
value: str = m.pop(99)
local_print(value)
value: str = m.pop(99, "joe")
local_print(value)
value: str = m.pop(key=99, default="joe")
local_print(value)
# popitem(self) -> tuple[KT, KV]:
item: tuple[int, str] = m.popitem()
local_print(item)
# update(self, pairs: Union[Iterable[Any], Mapping[KT, VT]]) -> None:
m.update([(10, "A"), (20, "B")]) # Iterable of tuples
m.update([[30, "C"], [40, "D"]]) # Iterable of 2-element list
m.update({50: "E", 60: "F"}) # Mapping of int, str.
m.update(m2) # Map[int, str]
# items(self) -> ItemsView[KT, VT]:
items: unreal.ItemsView[int, str] = m.items()
for i in items:
local_print(i)
# keys(self) -> Iterable[KT]:
keys: typing.Iterable[int] = m.keys()
for k in keys:
local_print(k)
# values(self) -> Iterable[VT]:
values: typing.Iterable[str] = m.values()
for v in values:
local_print(v)
# Check polymorphism.
poly: unreal.Map[typing.Union[unreal.Name, str], unreal.Object] = unreal.Map(unreal.Name, unreal.Object)
poly["joe"] = unreal.Actor() # Accept "joe" because a 'str' is convertible to a 'unreal.Name'
# Check using Optional and None.
int_obj_map: unreal.Map[int, typing.Optional[unreal.Object]] = unreal.Map(int, unreal.Object)
int_obj_map.__setitem__(0, None)
int_obj_map[10] = None
# Type coercion
name_int_map: unreal.Map[typing.Union[str, unreal.Name], int] = unreal.Map(unreal.Name, int)
name_int_map.__setitem__("hello", 1)
name_int_map["hello"] = 10
def test_reflected_types() -> None:
""" Ensures that UE reflected types are correctly hinted (from reflection). """
# Check for init method (Using a PyTestStruct because PyTestTypeHint uses the init from base UObject)
s: unreal.PyTestStruct = unreal.PyTestStruct(False, 0, 0.0, unreal.PyTestEnum.ONE,
"Str", "Name", "Text", unreal.FieldPath(), unreal.FieldPath(), ["StrArray"], {"StrSet"}, {"StrIntMap": 1})
local_print(s)
o = unreal.PyTestTypeHint()
str_const: str = unreal.PyTestTypeHint.STR_CONST
int_const: int = unreal.PyTestTypeHint.INT_CONST
local_print(str_const)
local_print(int_const)
bool_prop: bool = o.bool_prop
bool_retv: bool = o.check_bool_type_hints(bool_prop)
local_print(bool_retv)
int_prop: int = o.int_prop
int_retv: int = o.check_integer_type_hints(int_prop)
local_print(int_retv)
float_prop: float = o.float_prop
float_retv: float = o.check_float_type_hints(float_prop, 0.0)
local_print(float_retv)
enum_prop: unreal.PyTestEnum = o.enum_prop
local_print(enum_prop)
enum_prop = unreal.PyTestEnum.ONE
enum_retv: unreal.PyTestEnum = o.check_enum_type_hints(enum_prop)
local_print(enum_retv)
str_prop: str = o.string_prop
str_retv: str = o.check_string_type_hints(str_prop)
local_print(str_retv)
name_prop: unreal.Name = o.name_prop
name_retv: unreal.Name = o.check_name_type_hints(name_prop)
local_print(name_retv)
o.name_prop = "some str" # Type coercion from str to unreal.Name()
o.check_name_type_hints("Hi") # Type coercion from str to unreal.Name()
text_prop: unreal.Text = o.text_prop
text_retv: unreal.Text = o.check_text_type_hints(text_prop)
local_print(text_retv)
o.text_prop = "some str" # Type coercion from str to unreal.Text()
o.check_text_type_hints("Hi") # Type coercion from str to unreal.Text()
field_path_prop: unreal.FieldPath = o.field_path_prop
field_path_retv: unreal.FieldPath = o.check_field_path_type_hints(field_path_prop)
local_print(field_path_retv)
struct_prop: unreal.PyTestStruct = o.struct_prop
struct_retv: unreal.PyTestStruct = o.check_struct_type_hints(struct_prop)
unreal.PyTestObject().func_taking_py_test_struct([True]) # List can be coerced into struct
unreal.PyTestObject().func_taking_py_test_struct({"bool": True}) # Dict can be coerced into struct
unreal.PyTestObject().func_taking_py_test_struct({"bool": True, "int": 44, "float": 44.5}.values()) # Iterable can be coerced into struct
local_print(struct_retv)
object_prop: typing.Optional[unreal.PyTestObject] = o.object_prop
object_retv: typing.Optional[unreal.PyTestObject] = o.check_object_type_hints(object_prop)
o.object_prop = None # Property can be None.
o.object_prop = unreal.PyTestChildObject() # Can be a derived type.
local_print(object_retv)
# Native Python data structure to test unreal.Array(), unreal.Set() and unreal.Map() function parameters.
py_str_list: typing.List[str] = ["a", "b", "c"]
py_str_set: typing.Set[str] = {"a", "b", "c"}
py_str_tuple: typing.Tuple[str, str, str] = ("a", "b", "c")
py_str_int_dict: typing.Mapping[str, int] = {"a": 0, "b": 1, "c": 2}
py_int_str_dict: typing.Mapping[int, str] = {1: "a", 2: "b", 3: "c"}
py_obj_list: typing.List[unreal.PyTestObject] = [unreal.PyTestObject()]
py_obj_set: typing.Set[unreal.PyTestObject] = {unreal.PyTestObject()}
py_int_obj_dict: typing.Mapping[int, unreal.PyTestObject] = {0: unreal.PyTestObject(), 1: unreal.PyTestObject()}
string_array: unreal.Array[str] = o.str_array_prop
name_array: unreal.Array[unreal.Name] = unreal.Array(unreal.Name)
text_array: unreal.Array[unreal.Text] = unreal.Array(unreal.Text)
object_array: unreal.Array[unreal.Object] = unreal.Array(unreal.Object)
array_retv: unreal.Array[unreal.Text] = o.check_array_type_hints(string_array, name_array, text_array, object_array)
local_print(array_retv)
# Array[Name] requires Name obj. Ideally Array[T] API would coerce string into Name, but that doesn't look feasible.
o.name_array_prop.append(unreal.Name("foo"))
o.name_array_prop = name_array
# Array[Text] requires Text obj. Ideally Array[T] API would coerce string into Text, but that doesn't look feasible.
o.text_array_prop.append(unreal.Text("foo"))
o.text_array_prop = text_array
# Array[Object] property accepts None as element.
o.object_array_prop.append(None)
# Accepts a Python Tuple[] in place of unreal.Array[] + type coercion of str in place of Name/Text + polymorphism
o.check_array_type_hints(py_str_tuple, py_str_tuple, py_str_tuple, py_obj_list)
# Accepts a Python List[] in place of unreal.Array[] + type coercion of str in place of Name/Text + polymorphism
o.check_array_type_hints(py_str_list, py_str_list, py_str_list, py_obj_list)
# Accepts a Python Iterable[] in place of unreal.Array[] + type coercion of str in place of Name/Text + polymorphism
o.check_array_type_hints(py_str_int_dict.keys(), py_str_int_dict.keys(), py_str_int_dict.keys(), py_int_obj_dict.values())
# Accepts a Python Set[] in place of an unreal.Array[] + type coercion of str in place of Name/Text + polymorphism
o.check_array_type_hints(py_str_set, py_str_set, py_str_set, py_obj_set)
# Accepts empty Python data structures
o.check_array_type_hints([], set(), [], [])
string_set: unreal.Set[str] = o.set_prop
name_set: unreal.Set[unreal.Name] = unreal.Set(unreal.Name)
object_set: unreal.Set[unreal.Object] = unreal.Set(unreal.Object)
set_retv: unreal.Set[unreal.Name] = o.check_set_type_hints(string_set, name_set, object_set)
local_print(set_retv)
# Accepts a Python Set[] in place of unreal.Set[] + type coercion of str in place of Name + polymorphism
o.check_set_type_hints(py_str_set, py_str_set, py_obj_set)
# Accepts a Python List[] in place of unreal.Set[] + type coercion of str in place of Name + polymorphism
o.check_set_type_hints(py_str_list, py_str_list, py_obj_list)
# Accepts a Python Iterable[] in place of unreal.Set[] + type coercion of str in place of Name + polymorphism
o.check_set_type_hints(py_str_int_dict.keys(), py_str_int_dict.keys(), py_int_obj_dict.values())
# Accepts empty Python data structures
o.check_set_type_hints([], set(), [])
int_str_map: unreal.Map[int, str] = o.map_prop
int_name_map: unreal.Map[int, unreal.Name] = unreal.Map(int, unreal.Name)
int_text_map: unreal.Map[int, unreal.Text] = unreal.Map(int, unreal.Text)
int_obj_map: unreal.Map[int, unreal.Object] = unreal.Map(int, unreal.Object)
map_retv: unreal.Map[str, typing.Optional[unreal.Object]] = o.check_map_type_hints(int_str_map, int_name_map, int_text_map, int_obj_map)
local_print(map_retv)
# Accepts a Python Dict[] in place of unreal.Map[] + type coercion of str in place of Name/Text + polymorphism
o.check_map_type_hints(py_int_str_dict, py_int_str_dict, py_int_str_dict, py_int_obj_dict)
# Accepts a list of tuple + type coercion of str in place of Name/Text + polymorphism
o.check_map_type_hints([(1, "A"), (2, "B")], [(1, "A"), (2, "B")], [(1, "A"), (2, "B")], py_int_obj_dict)
# Accepts a list of 2-element list + type coercion of str in place of Name/Text + polymorphism
o.check_map_type_hints([[1, "A"], [2, "B"]], [[1, "A"], [2, "B"]], [[1, "A"], [2, "B"]], py_int_obj_dict)
# Accepts empty Python data structures
o.check_map_type_hints({}, {}, {}, {})
o.check_map_type_hints([[1, "A"], [2, "B"]], [[1, "A"], [2, "B"]], [[1, "A"], [2, "B"]], {2: None})
delegate_callable = DelegateCallable()
delegate_prop: unreal.PyTestDelegate = o.delegate_prop
delegate_retv: unreal.PyTestDelegate = o.check_delegate_type_hints(delegate_prop)
if not delegate_retv.is_bound():
delegate_prop.bind_callable(delegate_callable.delegate_callback)
delegate_prop.unbind()
multicast_delegate_prop: unreal.PyTestMulticastDelegate = o.multicast_delegate_prop
o.multicast_delegate_prop = unreal.PyTestMulticastDelegate()
o.multicast_delegate_prop = multicast_delegate_prop
bool_retv: bool = unreal.PyTestTypeHint.check_static_function(True, 0, 0.1, "")
tuple_retv: typing.Tuple[int, str] = unreal.PyTestTypeHint.check_tuple_return_type("foo")
local_print(bool_retv)
local_print(tuple_retv)
def test_core_module() -> None:
"""
This function is here to remind that some types are defines in unreal_core.py and are
pulled in the stub file, for example uclass(), ustruct(), uenum(), uvalue(), uproperty(),
ufunction() are methods defined in that file. They are not hinted yet because hint
could not be turned off. Ideally, that file will be manually hinted once we set a minimum
Python version of 3.9. Currently, user are free to recompile the engine against a 3.x
version.
"""
pass
def test_slow_task() -> None:
""" Ensure the manually hinted SlowTask API is correctly hinted (In PyCore.cpp). """
total_work: int = 5
work_per_frame: float = 1.0
task: unreal.ScopedSlowTask
with unreal.ScopedSlowTask(work=total_work, desc="Testing Hinting") as task:
task.make_dialog(can_cancel=True, allow_in_pie=False)
for _ in range(total_work):
if task.should_cancel():
break
task.enter_progress_frame(work_per_frame, "Doing some work...")
with unreal.ScopedSlowTask(total_work, "Testing Hinting") as task:
task.make_dialog_delayed(1.0, can_cancel=True, allow_in_pie=False)
for _ in range(total_work):
if task.should_cancel():
break
task.enter_progress_frame(work=work_per_frame, desc="Doing some work...")
def test_py_core_methods() -> None:
""" Ensure the methods manually hinted in PyCore.cpp are correctly hinted. """
# log(arg: Any) -> None
unreal.log(0)
unreal.log("msg")
unreal.log((0, 1, 2))
# log_warning(arg: Any) -> None
unreal.AutomationLibrary.add_expected_log_error(str(1), 1, True)
unreal.log_warning(1)
unreal.AutomationLibrary.add_expected_log_error("msg", 1, True)
unreal.log_warning("msg")
unreal.AutomationLibrary.add_expected_log_error("0, 1, 2", 1, False)
unreal.log_warning([0, 1, 2])
# log_error(arg: Any) -> None
unreal.AutomationLibrary.add_expected_log_error(str(42), 1, True)
unreal.log_error(42)
unreal.AutomationLibrary.add_expected_log_error("msg", 1, True)
unreal.log_error("msg")
unreal.AutomationLibrary.add_expected_log_error(".a.: 10, .b.: 20", 1, False)
unreal.log_error({"a": 10, "b": 20})
# log_flush() -> None
unreal.log_flush()
# reload()/load_module()
module_name = "core" # This will load/reload "unreal_core.py"
unreal.reload(module_name)
unreal.load_module(module_name)
# new_object
valid_obj: unreal.PyTestObject = unreal.new_object(unreal.PyTestObject.static_class(), None, "MyObject", None)
local_print(valid_obj)
# cursory warnings for the next section that uses a non-existent blueprint located at /project/ - comment out the following supressions and example warnings if you have an asset saved as such
unreal.AutomationLibrary.add_expected_log_error("Failed to load '/project/': Can't find file.", 0, False)
unreal.log_warning("[FAKE] Failed to load '/project/': Can't find file.")
unreal.AutomationLibrary.add_expected_log_error("Failed to find object 'Object /project/.ActorBP'", 0, False)
unreal.log_warning("[FAKE] Failed to find object 'Object /project/.ActorBP'")
# find_object()/load_object() - The specified asset may not exist, but type match and would work if the Blueprint asset existed.
loaded_object: typing.Optional[unreal.Object] = unreal.load_object(None, "/project/.ActorBP", unreal.Object.static_class()) # outer = None -> transient package.
local_print(loaded_object)
found_object: typing.Optional[unreal.Object] = unreal.find_object(None, "/project/.ActorBP", unreal.Object.static_class()) # outer = None -> transient package.
local_print(found_object)
# load_class() - The specified class may not exist, but types match and this would work if Blueprint asset existed.
loaded_class: typing.Optional[unreal.Class] = unreal.load_class(None, "Blueprint'/project/.ActorBP_C'") # outer = None -> transient package.
local_print(loaded_class)
# find_asset()/load_asset() - The specified asset doesn't exist... it just to test type hinting.
loaded_asset: typing.Optional[unreal.Blueprint] = unreal.load_asset("/project/.ActorBP", unreal.Blueprint.static_class(), follow_redirectors=True)
local_print(loaded_asset)
found_asset: typing.Optional[unreal.Blueprint] = unreal.find_asset("/project/.ActorBP", unreal.Blueprint.static_class(), follow_redirectors=True)
local_print(found_asset)
# find_package()/load_package()
loaded_pkg: typing.Optional[unreal.Package] = unreal.load_package("/project/")
local_print(loaded_pkg)
found_pkg: typing.Optional[unreal.Package] = unreal.find_package("/project/")
local_print(found_pkg)
# get_default_object()
cdo: typing.Optional[unreal.PyTestObject] = unreal.get_default_object(unreal.PyTestObject.static_class())
local_print(cdo)
# purge_object_references()
unreal.purge_object_references(unreal.PyTestObject(), include_inners=True)
# generate_class()/generate_struct()/generate_enum() -> Those are used by @unreal.uenum, @unreal.ustruct and @unreal.uclass decorator
# that are defined in unreal_core.py module. Normally, user shouldn't need to call them, but type checked.
unreal.generate_enum(unreal.PyTestEnum)
unreal.generate_struct(unreal.PyTestStruct)
unreal.generate_class(unreal.PyTestObject)
# get_type_from_class()/get_type_from_struct()/get_type_from_enum()
cls_type: type = unreal.get_type_from_class(unreal.PyTestObject.static_class())
local_print(cls_type)
struct_type: type = unreal.get_type_from_struct(unreal.PyTestStruct.static_struct())
local_print(struct_type)
enum_type: type = unreal.get_type_from_enum(unreal.PyTestEnum.static_enum())
local_print(enum_type)
# register_python_shutdown_callback()/unregister_python_shutdown_callback()
def shutdown_callback() -> None:
local_print("goodbye!")
opaque_handle = unreal.register_python_shutdown_callback(shutdown_callback)
unreal.unregister_python_shutdown_callback(opaque_handle)
# NSLOCTEXT/LOCTABLE -> 'StrTable' is a 'String Table' asset that can be created in the Content Browser. If you have created the string table asset, please comment out the following supression and example warning.
unreal.AutomationLibrary.add_expected_log_error("Failed to find string table entry for '/project/' 'Foo'. Did you forget to add a string table redirector?", 0, False)
unreal.log_warning("[FAKE] Failed to find string table entry for '/project/' 'Foo'. Did you forget to add a string table redirector?")
found_text: unreal.Text = unreal.LOCTABLE("/project/", "Foo")
local_print(found_text)
loc_text: unreal.Text = unreal.NSLOCTEXT("MyNamespace", "NewKey2", "NewKeyValue")
local_print(loc_text)
# is_editor()
editor_runtime: bool = unreal.is_editor()
local_print(editor_runtime)
# get_interpreter_executable_path()
path: str = unreal.get_interpreter_executable_path()
local_print(path)
# Object iterator (passing a type)
object_it = unreal.ObjectIterator(unreal.PyTestObject)
visited_obj: unreal.PyTestObject
for visited_obj in object_it:
local_print(visited_obj)
# Object iterator (passing a unreal.Class)
static_mesh_it = unreal.ObjectIterator(unreal.StaticMesh.static_class())
visited_mesh_object: unreal.StaticMesh
for visited_mesh_object in static_mesh_it:
local_print(visited_mesh_object)
# Class iterator
class_it = unreal.ClassIterator(unreal.StaticMeshActor)
visited_class: unreal.Class
for visited_class in class_it:
local_print(visited_class)
# Struct iterator
struct_it = unreal.StructIterator(unreal.PyTestStruct)
visited_script_struct: unreal.ScriptStruct
for visited_script_struct in struct_it:
local_print(visited_script_struct)
# Type iterator
type_it = unreal.TypeIterator(unreal.PyTestObject)
visited_type: type
for visited_type in type_it:
local_print(visited_type)
def test_py_editor_methods() -> None:
""" Ensure the methods manually hinted in PyEditor.cpp are correctly hinted. """
engine_ss1: typing.Optional[unreal.EngineSubsystem] = unreal.get_engine_subsystem(unreal.SubobjectDataSubsystem) # Using type
local_print(engine_ss1)
engine_ss2: typing.Optional[unreal.EngineSubsystem] = unreal.get_engine_subsystem(unreal.SubobjectDataSubsystem.static_class()) # Using Class
local_print(engine_ss2)
editor_ss1: typing.Optional[unreal.EditorSubsystem] = unreal.get_editor_subsystem(unreal.EditorActorSubsystem) # Using type.
local_print(editor_ss1)
editor_ss2: typing.Optional[unreal.EditorSubsystem] = unreal.get_editor_subsystem(unreal.EditorActorSubsystem.static_class()) # Using Class
local_print(editor_ss2)
with unreal.ScopedEditorTransaction(desc="My Transaction"):
local_print("do something")
def test_py_slate_methods() -> None:
def tick_callable(dt: float) -> None:
local_print(dt)
# Using a Python callable
pre_tick_handle: object = unreal.register_slate_pre_tick_callback(tick_callable)
post_tick_handle: object = unreal.register_slate_post_tick_callback(tick_callable)
unreal.unregister_slate_pre_tick_callback(pre_tick_handle)
unreal.unregister_slate_post_tick_callback(post_tick_handle)
# Using an unreal delegate.
o = unreal.PyTestTypeHint()
o.slate_tick_delegate.bind_callable(tick_callable)
pre_tick_handle = unreal.register_slate_pre_tick_callback(o.slate_tick_delegate)
post_tick_handle = unreal.register_slate_post_tick_callback(o.slate_tick_delegate)
unreal.unregister_slate_pre_tick_callback(pre_tick_handle)
unreal.unregister_slate_post_tick_callback(post_tick_handle)
o.slate_tick_delegate.unbind()
# Always false. The enclosed instructions are type-verified, but never executed.
if o.slate_tick_delegate.is_bound():
opaque_window_handle: object = 0 # Using an int, but this could be any other object type.
unreal.parent_external_window_to_slate(opaque_window_handle, unreal.SlateParentWindowSearchMethod.ACTIVE_WINDOW)
def test_py_engine_methods() -> None:
# The function 'get_blueprint_generated_types' accepts user-defined 'Enumeration' asset which can be created from the content browser.
asset_subsystem : typing.Optional[unreal.EditorAssetSubsystem] = unreal.get_editor_subsystem(unreal.EditorAssetSubsystem)
if asset_subsystem is not None and asset_subsystem.does_asset_exist("/project/"):
from_str = unreal.get_blueprint_generated_types("/project/")
local_print(from_str)
from_list = unreal.get_blueprint_generated_types(["/project/"])
local_print(from_list)
from_set = unreal.get_blueprint_generated_types({"/project/"})
local_print(from_set)
from_tuple = unreal.get_blueprint_generated_types(("/project/",))
local_print(from_tuple)
from_iterable = unreal.get_blueprint_generated_types({"/project/": 0}.keys())
local_print(from_iterable)
# Iterate all actor (testing default param)
editor_subsystem: typing.Optional[unreal.UnrealEditorSubsystem] = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
if editor_subsystem is not None:
world: typing.Optional[unreal.World] = editor_subsystem.get_editor_world()
if world:
actor_it: unreal.ActorIterator = unreal.ActorIterator(world)
for actor in actor_it:
local_print(actor)
# Iterate a specific type of actor.
selected_actor_it: unreal.SelectedActorIterator = unreal.SelectedActorIterator(world, unreal.StaticMeshActor)
static_mesh_actor: unreal.StaticMeshActor
for static_mesh_actor in selected_actor_it:
local_print(static_mesh_actor)
unreal.ValueDef(3, {"DisplayName": "Simple Value"})
unreal.PropertyDef(int, {"DisplayName": "Simple Prop"}, None, None)
unreal.FunctionDef(unreal.log, {"DisplayName": "Simple Func"}, None, None, None, None, None, None, None)
# Ensure we can declare and execute methods hinting Unreal generic containers. With Pyton 3.9.7, the container variable hinted inside a function
# doesn't seem to have the same requirements as the ones hinted globaly, as function parameters or return values. Those tests were added to ensure
# the Unreal containers can be hinted in all cases.
global_fixed_array: unreal.FixedArray[int] = unreal.FixedArray(int, 3)
def test_fixed_array_hints(a: unreal.FixedArray[int]) -> unreal.FixedArray[str]:
local_print(a)
return unreal.FixedArray.cast(str, ["A", "B", "C"])
global_array: unreal.Array[int] = unreal.Array(int)
def test_array_hints(a: unreal.Array[int]) -> unreal.Array[str]:
local_print(a)
return unreal.Array.cast(str, ["A", "B", "C"])
global_set: unreal.Set[int] = unreal.Set(int)
def test_set_hints(s: unreal.Set[int]) -> unreal.Set[str]:
local_print(s)
return unreal.Set.cast(str, {"A", "B", "C"})
global_map: unreal.Map[int, str] = unreal.Map(int, str)
def test_map_hints(m: unreal.Map[int, str]) -> unreal.Map[int, str]:
local_print(m)
return unreal.Map.cast(int, str, {0:"A", 1:"B"})
if __name__ == "__main__":
test_name_wrapper()
test_text_wrapper()
test_delegate_wrapper()
test_multicast_delegate_wrapper()
test_field_type_wrapper()
test_enum_wrapper()
test_struct_wrapper()
test_object_wrapper()
test_fixed_array_hints(global_fixed_array)
test_fixed_array_wrapper()
test_array_wrapper()
test_array_hints(global_array)
test_set_wrapper()
test_set_hints(global_set)
test_map_wrapper()
test_map_hints(global_map)
test_reflected_types()
test_core_module()
test_slow_task()
test_py_core_methods()
test_py_editor_methods()
test_py_slate_methods()
test_py_engine_methods()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 11:46:18 2024
@author: WillQuantique
"""
import unreal
import time
import random as rd
import sys
sys.path.append("D:/Lab Project/UE_5.3/project/")
import numpy as np
def reset_scene(sequence_path):
"""
Deletes the specified sequence from the asset library and removes the MetaHuman actor and camera actor from the scene.
Args:
sequence_path (str): The path to the sequence asset in the asset library.
Returns:
None
"""
# Delete the sequence from the asset library
sequence_asset = unreal.EditorAssetLibrary.load_asset(sequence_path)
if sequence_asset:
unreal.EditorAssetLibrary.delete_asset(sequence_path)
print(f"Sequence '{sequence_path}' deleted from the asset library.")
else:
print(f"Sequence '{sequence_path}' not found in the asset library.")
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
for actor in all_actors:
unreal.log(actor.get_name())
if "F" in actor.get_name() or "Rig" in actor.get_name() or actor.get_class().get_name() == "CineCameraActor" or "H" in actor.get_name():
unreal.EditorLevelLibrary.destroy_actor(actor)
print(f"Deleted actor: {actor.get_name()}")
def SetupCineCameraActor(location=unreal.Vector(0, 0, 0), rotation=unreal.Rotator(0, 0, 0)):
# adapted from https://github.com/project/.py
"""
Parameters
----------
location : Unreal vector for camera location
The default is unreal.Vector(0, 0, 0).
rotation : Unreal rotator for camera rotation
The default is unreal.Rotator(0, 0, 0).
focus : float, for focus distance
The default is 132.0.
Returns
-------
camera_actor : Unreal actor
The camera set with given rotation, position
"""
# Spawn a Camera Actor in the World
world = unreal.EditorLevelLibrary.get_editor_world()
camera_actor = unreal.EditorLevelLibrary.spawn_actor_from_class(unreal.CineCameraActor, location, rotation)
# create an instance of camera settings and set it to manual focus method
settings = unreal.CameraFocusSettings()
settings.focus_method = unreal.CameraFocusMethod.MANUAL
# create the camera and pass the settings to it
ccc = camera_actor.get_cine_camera_component()
ccc.set_editor_property("focus_settings", settings)
return camera_actor
def CreateSequence(sequence_name, camera, length_frames=1, package_path='/project/'):
'''
Args:
sequence_name(str): The name of the sequence to be created
length_frames(int): The number of frames in the camera cut section.
package_path(str): The location for the new sequence
Returns:
unreal.LevelSequence: The created level sequence
'''
all_actors = unreal.EditorLevelLibrary.get_all_level_actors()
# Create the sequence asset in the desired location
sequence = unreal.AssetToolsHelpers.get_asset_tools().create_asset(
sequence_name,
package_path,
unreal.LevelSequence,
unreal.LevelSequenceFactoryNew())
sequence.set_playback_end(length_frames)
movie_scene = sequence.get_movie_scene()
for actor in all_actors:
unreal.log(actor.get_class().get_name())
if actor.get_class().get_name() == "CineCameraActor":
camera = actor
break
cam_binding = sequence.add_possessable(camera)
for actor in all_actors:
actor_binding = sequence.add_possessable(actor)
ccc = camera.get_cine_camera_component()
filmback = unreal.CameraFilmbackSettings()
filmback.sensor_width = 16
filmback.sensor_height = 16
ccc.set_editor_property("filmback", filmback)
pp_settings = ccc.get_editor_property('post_process_settings')
pp_settings.motion_blur_amount = 0
ccc.set_editor_property('post_process_settings', pp_settings)
camera_cut_track = sequence.add_master_track(unreal.MovieSceneCameraCutTrack)
# Add a camera cut track for this camera
# Make sure the camera cut is stretched to the -1 mark
camera_cut_section = camera_cut_track.add_section()
camera_cut_section.set_start_frame(-1)
camera_cut_section.set_end_frame(length_frames)
# bind the camera
camera_binding_id = unreal.MovieSceneObjectBindingID()
camera_binding_id.set_editor_property("Guid", cam_binding.get_id())
camera_cut_section.set_editor_property("CameraBindingID", camera_binding_id)
# Add a current focal length track to the cine camera component
camera_component = camera.get_cine_camera_component()
camera_component_binding = sequence.add_possessable(camera_component)
camera_component_binding.set_parent(cam_binding)
focal_length_track = camera_component_binding.add_track(unreal.MovieSceneFloatTrack)
focal_length_track.set_property_name_and_path('CurrentFocalLength', 'CurrentFocalLength')
focal_length_section = focal_length_track.add_section()
focal_length_section.set_start_frame_bounded(0)
focal_length_section.set_end_frame_bounded(length_frames)
# add a transform track for later manipulation
transform_track = cam_binding.add_track(unreal.MovieScene3DTransformTrack)
transform_track.set_property_name_and_path('Transform', 'Transform')
transform_section = transform_track.add_section()
transform_section.set_start_frame_bounded(0)
transform_section.set_end_frame_bounded(length_frames)
# add track for focus distance
# Add a track for the manual focus distance
focus_distance_track = camera_component_binding.add_track(unreal.MovieSceneFloatTrack)
focus_distance_track.set_property_name_and_path("ManualFocusDistance", "FocusSettings.ManualFocusDistance")
focus_distance_section = focus_distance_track.add_section()
focus_distance_section.set_start_frame_bounded(0)
focus_distance_section.set_end_frame_bounded(length_frames)
return sequence
def add_metahuman_components_to_sequence(sequence, metahuman_actor, sequence_path):
# Retrieve all components attached to the metahuman actor
unreal.log("before world")
editor_subsystem = unreal.get_editor_subsystem(unreal.UnrealEditorSubsystem)
sequence_path = ensure_package_name_format(sequence_path)
sequence_to_activate = unreal.load_asset(sequence_path)
print("under_world")
s = unreal.LevelSequenceEditorBlueprintLibrary.open_level_sequence(sequence_to_activate)
print("overworld")
# Use the new recommended method to get the editor world
world = editor_subsystem.get_editor_world()
unreal.log("world")
skeletal_mesh_components = metahuman_actor.get_components_by_class(unreal.SkeletalMeshComponent)
unreal.log("mesh")
# sequence = unreal.LevelSequenceEditorBlueprintLibrary.get_current_level_sequence()
unreal.log("sequence")
for skeletal_mesh_component in skeletal_mesh_components:
# Assuming naming convention to identify relevant skeletal meshes for control rigs
if "face" in skeletal_mesh_component.get_name().lower():
rig = unreal.load_asset('/project/')
skel = sequence.add_possessable(skeletal_mesh_component)
rig_class = rig.get_control_rig_class()
rig_track = unreal.ControlRigSequencerLibrary.find_or_create_control_rig_track(world, sequence, rig_class,skel)
elif "body" in skeletal_mesh_component.get_name().lower():
rig = unreal.load_asset("/project/")
skel = sequence.add_possessable(skeletal_mesh_component)
rig_class = rig.get_control_rig_class()
rig_track = unreal.ControlRigSequencerLibrary.find_or_create_control_rig_track(world, sequence, rig_class,skel)
def find_delta_z(rig, sequence):
control_name = "CTRL_C_jaw"
frame_number = unreal.FrameNumber(0)
pos = unreal.ControlRigSequencerLibrary.get_control_rig_world_transform(sequence, rig, control_name, frame_number)
return 142.458249 - pos.translation.z
def LoadBlueprint(blueprint_path: str, loc: unreal.Vector = unreal.Vector(0, 0, 0),
rot: unreal.Rotator = unreal.Rotator(0, 0, 0), size = "n"):
"""
Parameters
----------
blueprint_path : str
Unreal path to the blue print eg. Game/project/
loc : unreal.Vector, optional
Desired Position in absolute coordinates The default is unreal.Vector(0,0, 0).
rot : unreal.Rotator, optional
Desired Rotation The default is unreal.Rotator(0, 0, 0).
Returns
-------
actor : TYPE
Actor as defined by the blue print
"""
asset = unreal.EditorAssetLibrary.load_asset(blueprint_path)
if asset is None:
print(f"Failed to load asset at {blueprint_path}")
return None
if size == "g":
loc.z -= 10
elif size == "tg":
loc.z-= 19.32
actor = unreal.EditorLevelLibrary.spawn_actor_from_object(asset, loc, rot)
return actor
def MovieQueueRender(u_level_file, u_level_seq_file, u_preset_file, job_name: str, sequence=None):
"""
Parameters
----------
u_level_file : Unreal path
Path to level
u_level_seq_file : Unreal path
Path to sequence
u_preset_file : Unreal path
Path to movie render presets
Returns
-------
None.
"""
subsystem = unreal.get_editor_subsystem(unreal.MoviePipelineQueueSubsystem)
executor = unreal.MoviePipelinePIEExecutor()
queue = subsystem.get_queue()
#queue.delete_all_jobs()
# config render job with movie pipeline config
job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
job.job_name = job_name
job.map = unreal.SoftObjectPath(u_level_file)
job.sequence = unreal.SoftObjectPath(u_level_seq_file)
preset = unreal.EditorAssetLibrary.find_asset_data(u_preset_file).get_asset()
job.set_configuration(preset)
if sequence is not None:
print(unreal.MoviePipelineLibrary.update_job_shot_list_from_sequence(sequence, job))
subsystem.render_queue_with_executor_instance(executor)
print("###################################################\n")
print("rendered")
print("###################################################\n")
def get_file_paths(folder_path, limit=0):
file_paths = []
# Convertir le chemin du dossier en un objet Path d'Unreal Engine
assets = unreal.EditorAssetLibrary.list_assets(folder_path)
# Parcourir les assets et ajouter leurs chemins complets à la liste
asset_paths = unreal.EditorAssetLibrary.list_assets(folder_path)
cnt = 0
# Parcourir les chemins d'assets et ajouter les chemins valides à la liste
for asset_path in asset_paths:
cnt += 1
if unreal.EditorAssetLibrary.does_asset_exist(asset_path):
file_paths.append(asset_path)
if cnt == limit:
break
return file_paths
def ensure_package_name_format(object_path):
"""
Converts an object path to a package name if necessary.
Unreal Engine requires package names instead of object paths for certain operations.
Args:
object_path (str): The original object path.
Returns:
str: The corrected package name.
"""
if object_path.startswith('/Game/') and ('.' in object_path):
# Converts object path to package name by removing the part after the last dot
return object_path.rsplit('.', 1)[0]
return object_path
def control_by_control(pose_asset_path, sequence, frame=1, size="n", delta = 0):
print("Control by control ongoing")
pose_asset_path = ensure_package_name_format(pose_asset_path)
pose_asset = unreal.EditorAssetLibrary.load_asset(pose_asset_path)
rigProxies = unreal.ControlRigSequencerLibrary.get_control_rigs(sequence)
# Filter rigs for specific names
rigs = [rigProxy.control_rig for rigProxy in rigProxies if
rigProxy.control_rig.get_name() in ["Face_ControlBoard_CtrlRig", "MetaHuman_ControlRig"]]
delta_z = find_delta_z(rigs[0], sequence)
print(f"###############{delta_z}#######################")
# Prepare all transformations in a list comprehension
transformations = [
(
rig,
control.name,
unreal.FrameNumber(frame),
unreal.Transform(
location={
"n": control.global_transform.translation,
"g": control.global_transform.translation,# + unreal.Vector(0, 0, 0),
"p": control.global_transform.translation,# - unreal.Vector(0, 0, delta_z/2),
"tg": control.global_transform.translation, # + unreal.Vector(0, 0, delta_z)
}[size],
rotation=unreal.Rotator(control.global_transform.rotation.x, control.global_transform.rotation.y,
control.global_transform.rotation.z), # from quat to rotator
scale=control.global_transform.scale3d
)
)
for rig in rigs
for control in pose_asset.pose.copy_of_controls
if "teeth" not in str(control.name)
]
# Apply transformations
for rig, control_name, frame_number, control_transform in transformations:
control_transform.translation.x += delta*100
unreal.ControlRigSequencerLibrary.set_control_rig_world_transform(sequence, rig, control_name, frame_number,
control_transform)
def add_key_to_position_track(sequence, transform, frame):
for binding in sequence.get_bindings():
for track in binding.get_tracks():
if track.get_display_name() == "Transform":
transform_section = track.get_sections()[0]
break
time = unreal.FrameNumber(frame)
all_channels = transform_section.get_all_channels()
transform_dict = {
"Location.X": transform.translation.x,
"Location.Y": transform.translation.y,
"Location.Z": transform.translation.z,
"Rotation.X": transform.rotation.x,
"Rotation.Y": transform.rotation.y,
"Rotation.Z": transform.rotation.z
}
for channel in all_channels:
if str(channel.channel_name) in transform_dict:
value = transform_dict[str(channel.channel_name)]
channel.add_key(time, value)
def add_key_to_focal_track(sequence, foc, frame):
for binding in sequence.get_bindings():
for track in binding.get_tracks():
if track.get_display_name() == 'CurrentFocalLength':
transform_section = track.get_sections()[0]
break
time = unreal.FrameNumber(frame)
all_channels = transform_section.get_all_channels()
for channel in all_channels:
channel.add_key(time, foc)
def add_key_to_focus_track(sequence, foc, frame):
for binding in sequence.get_bindings():
for track in binding.get_tracks():
if track.get_display_name() == "ManualFocusDistance":
transform_section = track.get_sections()[0]
break
time = unreal.FrameNumber(frame)
all_channels = transform_section.get_all_channels()
for channel in all_channels:
channel.add_key(time, foc)
def make_pos_rot_from_angle_and_foc(focal, focus, angle_y, angle_z):
dist = 2.6*focal # this how to determine the right distance from the camera given thefocal lenght to have teh face full on the picture, given the camera sensor size
cam_angle_y, cam_angle_z, cam_x, cam_y, cam_z = camera_position_angles(angle_y, angle_z, dist)
return {"pos": (cam_x, cam_y, cam_z),
"rot": (0, cam_angle_y, cam_angle_z, 0),
"focal": focal,
"focus": dist + focus
}
def camera_position_angles(angle_y, angle_z, distance):
# Convert angles from degrees to radians
angle_z_rad = np.radians(angle_z) # Rotation around Z-axis affects X-Y plane horizontally
angle_y_rad = np.radians(angle_y) # Rotation around Y-axis affects X-Z plane vertically
# Calculate coordinates based on angles
x = distance * np.cos(angle_z_rad) * np.cos(angle_y_rad)
y = distance * np.sin(angle_z_rad) * np.cos(angle_y_rad)
z = distance * np.sin(angle_y_rad)
# If positive Y-angle means pointing up, Z should decrease with increasing Y-angle
final_z = 149 - z # Subtract from 149 to adjust Z downwards as Y-angle increases
return angle_y, angle_z, -x, -y, final_z,
grand_petit = {
"FA003": "p",
"FA004": "p",
"FA006": "p",
"FA008": "p",
"FA015": "p",
"FA016": "p",
"FB001": "g",
"FB005": "g",
"FB006": "p",
"FB008": "g",#
"FB009": "g",
"FB010": "g",
"FB011": "g",
"FB012": "g",
"FB013": "g",
"FB014": "p",
"FN004": "g",#
"FN008": "g",
"FN015": "g",
"FN016": "g",
"FN017": "g",
"HA001": "g",
"HA003": "g",
"HA004": "g",
"HA005": "g",
"HA006": "g",
"HA007": "g",
"HA008": "g",
"HA010": "g",
"HA011": "g",
"HA012": "g",
"HA013": "g",
"HA014": "g",
"HA015": "g",
"HA016": "g",
"HA017": "g",
"HB001": "tg",
"HB002": "g",
"HB003": "g",
"HB004": "g",
"HB005": "g",
"HB006": "g",
"HB007": "g",
"HB008": "g",
"HB009": "g",
"HB010": "g",
"HB011": "g",
"HB012": "g",
"HB013": "g",
"HB014": "g",
"HB015": "g",
"HB016": "g",
"HN001": "tg",
"HN002": "g",
"HN003": "tg",
"HN004": "g",
"HN006": "g",
"HN007": "tg",
"HN008": "g",
"HN009": "g",
"HN010": "g",
"HN011": "g",
"HN013": "tg",
"HN014": "tg",
"HN015": "g",
"HN016": "g",
"HN017": "g",
}
def set_cam_condition(sequence, con_dic, frame, size="n", delta = 0):
"""
Parameters
----------
sequence sequence over which we want to operate
con_dic dictionnary of conditions
frame frame over whitch we want to operate
size either normal big small ior very big, allows to predict a delta in the z coordinate
delta delta in the x coordinate
Returns
Set the camera at the right place at the right time with the right orientation
Deals with focal length and focus as well
-------
"""
# set the spatial transform
transform = unreal.Transform()
transform.translation = unreal.Vector(con_dic["pos"][0], con_dic["pos"][1], con_dic["pos"][2])
transform.rotation = unreal.Quat(con_dic["rot"][0], con_dic["rot"][1], con_dic["rot"][2], con_dic["rot"][3])
if size == "p":
transform.translation.z -= 8.234
elif size == "g":
transform.translation.z += 9.468
transform.translation.x += delta*100
focal = con_dic["focal"]
focus = con_dic["focus"]
add_key_to_position_track(sequence, transform, frame)
add_key_to_focal_track(sequence, focal, frame)
add_key_to_focus_track(sequence, focus, frame)
def get_size_from_identity(iden, grand_petit):
"""
Parameters
----------
iden : name of metahuman
grand_petit dictionary that knows their size
Returns
-------
"""
size = "n" # normal
if iden in grand_petit:
size = grand_petit[iden]
return size # will return either "n" "p" "g" or "tg"
def chose_nth(liste, n=None):
# chose randomly an nth of the samples from this list sorted, 1 sample if n is none
if n == None:
n = len(liste)
return sorted(rd.choices(liste, k=int(len(liste) / n)))
def full_run(iden,n, test = False, delta =0):
"""
Parameters
----------
iden : the name of one metahuman
n : condition divider
test : test or not ?
delta : distance from origin in the x axis
Returns
-------
"""
# create the camera
camera = SetupCineCameraActor(unreal.Vector(0, 80, 149), unreal.Rotator(0, 0, -90))
# load all the emotions asset path
pose_asset_folder = "/project/"
all_pose_asset_path = get_file_paths(pose_asset_folder)
# initiate the basic conditions list
focals = [12, 18, 24, 50, 70]
focuses = [i for i in range(-100, 100, 10)]
z_angles = [i for i in range(-150, -40, 5)]
y_angles = [i for i in range(-30, 30, 5)]
con_dict = {}
# this store the initial condition the one that every character will got through
initial_pose = pose_asset_folder+"Joy-01_Satisfaction" # the most neutral expression
initial_cond = []
# but only if not in test
if test == False :
initial_cond = [
{"emo": initial_pose, "cam": make_pos_rot_from_angle_and_foc(50, 0, y, z)}
for z in z_angles[5:-5]
for y in y_angles[5:-5]
]
# add all the other conditions
initial_cond += [
{"emo": a, "cam": make_pos_rot_from_angle_and_foc(f, foc, y, z)}
for f in chose_nth(focals, n)
for foc in chose_nth(focuses, n)
for z in chose_nth(z_angles, n)
for y in chose_nth(y_angles, n)
for a in chose_nth(all_pose_asset_path, n)
]
# the ditcionnary of condition for our metahuman
con_dict[iden] = initial_cond
# this is important so that add metahuman to seuqence know whereto look for sequences
u_level_seq_file = f"/project/"
# loop through the dictionary
for k, v in con_dict.items():
path = f"/project/{iden}/BP_{iden}" # define path of the metahuman
size = get_size_from_identity(iden, grand_petit)
MH = LoadBlueprint(path, unreal.Vector(delta*100,0,0), size = size) # place it in a row aligned with it's friends (see enumarate loop with full_run)
length = len(v) # this will tell create sequence the lenght of the sequence it should create
sequ_name = f"sequence_{iden}" # name of teh sequence to be created
sequence = CreateSequence(sequ_name, camera, length) # finally create the seuqnce
add_metahuman_components_to_sequence(sequence, MH, u_level_seq_file + sequ_name) # this will create a animation track for each drivers of the metahuman
print(r"\n\n############ thank you for your attention ##########")
# loop through te condition and usng their index assign them to the nth
for cnt, e in enumerate(v):
set_cam_condition(sequence, e["cam"], cnt, size, delta)
control_by_control(e["emo"], sequence, cnt, size, delta)
def start_over():
"""
This functions cleans up the project file:
it whipes out all existing sequences and free up the rendering queue
Usefull to make several batches in row
Returns
-------
"""
# find and delete sequences
all_sequence_path = get_file_paths("/project/")
for seq_path in all_sequence_path:
reset_scene(seq_path)
# Wipe the queue
subsystem = unreal.get_editor_subsystem(unreal.MoviePipelineQueueSubsystem)
executor = unreal.MoviePipelinePIEExecutor()
queue = subsystem.get_queue()
queue.delete_all_jobs()
def generate(b:int,e:int, n:int, test:bool):
"""
Parameters
----------
b int : first indice of the identity oto be generated
e : last indice of the identity to be genrated
n : None or int , number by witch the amount of unique condition will be divided, if None the generate 1 value per condition
test : if True will skip the initial condition used in conjunction with None allows to generate only one picture for testing purposes
Returns Nothing
This function will create à list of all the possible names the metahumans can have
Loop through a portion of them (as defined by b and n) and create a squence for each of them
See full_run for more details
-------
"""
genders = ["F", "H"]
races = ["A", "B", "N"]
id_nb = [f"{i + 1:03}" for i in range(17)]
identities = [g + r + ids for g in genders for r in races for ids in id_nb]
for i, iden in enumerate(identities[b:e]):
full_run(iden, n,test , i)
def render_everything():
u_level_file = "/project/"
all_sequence_path = get_file_paths("/project/")
for i, seq_path in enumerate(all_sequence_path):
MovieQueueRender(u_level_file, seq_path, "/project/", seq_path[-5:])
if __name__ == "__main__":
start = time.time()
start_over()
generate(-5, -4, None, False)
render_everything()
end = time.time()
print(r"\n\n############ pay close attention here!!!!! ##########\n\n")
print(f"temps écoulé : {end - start}s")
print(r"\n\n############ thank you for your attention ###########")
|
import unreal
def run_image_2_text():
"""
Function to run image to text tagging.
"""
subsystem = unreal.get_editor_subsystem(unreal.AITagsEditorSubsystem)
subsystem.clean_cached_assets()
selected_assets = unreal.EditorUtilityLibrary.get_selected_asset_data()
subsystem.add_assets_to_cache(selected_assets)
unreal.log("Python:: Running image to text tagging...")
subsystem.start_image_to_text()
def run_clip_tagging(use_per_category=False, use_threshold=False, threshold=0.2):
"""
Function to run clip tagging.
"""
subsystem = unreal.get_editor_subsystem(unreal.AITagsEditorSubsystem)
subsystem.clean_cached_assets()
selected_assets = unreal.EditorUtilityLibrary.get_selected_asset_data()
subsystem.add_assets_to_cache(selected_assets)
unreal.log("Python:: Running clip tagging...")
subsystem.start_clip_tagging(use_per_category, use_threshold, threshold)
@unreal.uclass()
class PythonAITagsEditorLibrary(unreal.BlueprintFunctionLibrary):
"""
Blueprint function library for AITagging.
Provides static methods to run image to text and clip tagging.
"""
@unreal.ufunction(static=True, meta=dict(Category="AITagging"))
def RunImage2TextFunc():
run_image_2_text()
@unreal.ufunction(static=True, meta=dict(Category="AITagging"), params=[bool, bool, float])
def RunCLIPTaggingFunc(use_per_category:bool=True, use_threshold:bool=False, threshold:float=0.2):
run_clip_tagging(use_per_category, use_threshold, threshold)
# NOTICE:
# This class is not working with Blutility because Epic searches for FAssetData instead runtime UObject
#
# @unreal.uclass()
# class AITaggingAssetActionTool(unreal.AssetActionUtility):
#
# @unreal.ufunction(static=True,meta={"Category":"AITagging","CallInEditor":True})
# def RunImage2Text():
# unreal.log(f"run_image_2_text")
#
# @unreal.ufunction(meta=dict(Category="AITagging",CallInEditor=True))
# def RunImage2TextV2(self):
# unreal.log(f"run_image_2_text")
#
# @unreal.ufunction(meta={"Category":"AITagging", "CallInEditor":True})
# def RunImage2TextV3(self):
# unreal.log(f"run_image_2_text")
#
# @unreal.ufunction(static=True,meta=dict(CallInEditor=True))
# def RunImage2TextV4():
# unreal.log(f"run_image_2_text")
#
# @unreal.ufunction(meta=dict(Category="AITagging"))
# def run_clip_tagging(self):
# unreal.log(f"run_clip_tagging")
|
import unreal
selected_assets = unreal.EditorUtilityLibrary.get_selected_assets()
loaded_subsystem = unreal.get_editor_subsystem(unreal.EditorAssetSubsystem)
saturation: float
error_message = ""
error_messages = []
asset_count = 0
selected_assets_count = len(selected_assets)
is_success = True
def make_error_message(messages:list[str]) -> str:
return "\n".join(messages)
for asset in selected_assets:
if not isinstance(asset, unreal.Texture2D):
error_message = f"{asset.get_name()}은 텍스처 파일이 아닙니다.\n"
error_messages.append(error_message)
continue
asset.set_editor_property('adjust_saturation', saturation)
path = asset.get_path_name().split('.')[0]
loaded_subsystem.save_asset(path)
asset_count += 1
if len(error_messages) > 0:
failed_asset_count = selected_assets_count - asset_count
failed_asset_message = f"총 {selected_assets_count}개 에셋들 중 {failed_asset_count}개 에셋에 오류가 발생했습니다.\n\n"
error_message = make_error_message(error_messages)
error_message = failed_asset_message + error_message
is_success = False
else:
error_message = f"모든 {asset_count}개 텍스처들에 {saturation:.2f}의 채도를 적용했습니다."
|
# AdvancedSkeleton To ControlRig
# Copyright (C) Animation Studios
# email: [email protected]
# exported using AdvancedSkeleton version:x.xx
import unreal
import re
engineVersion = unreal.SystemLibrary.get_engine_version()
asExportVersion = x.xx
asExportTemplate = '4x'
print ('AdvancedSkeleton To ControlRig (Unreal:'+engineVersion+') (AsExport:'+str(asExportVersion)+') (Template:'+asExportTemplate+')')
utilityBase = unreal.GlobalEditorUtilityBase.get_default_object()
selectedAssets = utilityBase.get_selected_assets()
if len(selectedAssets)<1:
raise Exception('Nothing selected, you must select a ControlRig')
selectedAsset = selectedAssets[0]
if selectedAsset.get_class().get_name() != 'ControlRigBlueprint':
raise Exception('Selected object is not a ControlRigBlueprint, you must select a ControlRigBlueprint')
blueprint = selectedAsset
RigGraphDisplaySettings = blueprint.get_editor_property('rig_graph_display_settings')
RigGraphDisplaySettings.set_editor_property('node_run_limit',256)
library = blueprint.get_local_function_library()
library_controller = blueprint.get_controller(library)
hierarchy = blueprint.hierarchy
hierarchy_controller = hierarchy.get_controller()
RigVMController = blueprint.get_controller() #UE5
PreviousArrayInfo = dict()
global ASCtrlNr
global PreviousEndPlug
global PreviousEndPlugInv
global PreviousYInv
global sp
global nonTransformFaceCtrlNr
PreviousYInv = 0
ASCtrlNr = 0
nonTransformFaceCtrlNr = -1
sp = '/project/.RigUnit_'
PreviousEndPlug = 'RigUnit_BeginExecution.ExecuteContext'
PreviousEndPlugInv = 'RigUnit_InverseExecution.ExecuteContext'
def asAddCtrl (name, parent, joint, type, arrayInfo, gizmoName, ws, size, offT, color):
global PreviousEndPlug
global PreviousEndPlugInv
global PreviousYInv
global PreviousArrayInfo
global ctrlBoxSize
global ASCtrlNr
global nonTransformFaceCtrlNr
endPlug = PreviousEndPlug
RigVMGraph = blueprint.get_model()
numNodes = len(RigVMGraph.get_nodes())
y = ASCtrlNr*400
ASCtrlNr=ASCtrlNr+1
ASDrivenNr = int()
RootScale = unreal.Vector(x=1.0, y=1.0, z=1.0)
ParentRigBone = unreal.RigBone()
ParentRigBoneName = parent.replace("FK", "")
hasCon = True
x = joint.split("_")
if len(x)>1:
baseName = x[0]
side = '_'+x[1]
x = ParentRigBoneName.split("_")
if len(x)>1:
ParentRigBoneBaseName = x[0]
RigElementKeys = asGetRigElementKeys ()
for key in RigElementKeys:
if (key.name == 'Root_M'):
hierarchy.get_global_transform(key, initial = True)
if (key.name == ParentRigBoneName):
if (key.type == 1):#Bone
ParentRigBone = hierarchy.find_bone (key)
asAddController (name, parent, joint, type, gizmoName, ws, size, offT, color)
if name=='Main':
return
if name=='RootX_M':
#Item
Item = asAddNode (sp+'Item','Execute',node_name=name+'_Item')
RigVMController.set_node_position (Item, [-500, y])
RigVMController.set_pin_default_value(name+'_Item.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_Item.Item.Name',name)
#CON
CON = asAddNode (sp+'ParentConstraint','Execute',node_name=name+'_CON')
RigVMController.set_node_position (CON, [100, y-90])
RigVMController.set_pin_default_value(name+'_CON.Child.Type','Bone')
RigVMController.set_pin_default_value(name+'_CON.Child.Name',joint)
RigVMController.add_link(name+'_Item.Item', name+'_CON.Parents.0.Item')
RigVMController.add_link(endPlug , name+'_CON.ExecuteContext')
endPlug = name+'_CON.ExecuteContext'
elif (not "inbetweenJoints" in arrayInfo) and (not "inbetweenJoints" in PreviousArrayInfo):
#GT
GT = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GT')
RigVMController.set_node_position (GT, [-500, y])
RigVMController.set_pin_default_value(name+'_GT.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_GT.Item.Name',name)
#ST
ST = asAddNode (sp+'SetTransform','Execute',node_name=name+'_ST')
RigVMController.set_node_position (ST, [100, y])
RigVMController.set_pin_default_value(name+'_ST.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_ST.Item.Name',joint)
RigVMController.add_link(name+'_GT.Transform' , name+'_ST.Transform')
RigVMController.set_pin_default_value(name+'_ST.bPropagateToChildren','True')
RigVMController.add_link(endPlug , name+'_ST.ExecuteContext')
endPlug = name+'_ST.ExecuteContext'
#twistJoints
if ("twistJoints" in arrayInfo) and (not "twistJoints" in PreviousArrayInfo):
GT2 = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GT2')
RigVMController.set_node_position (GT2, [500, y+50])
RigVMController.set_pin_default_value(name+'_GT2.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_GT2.Item.Name',name)
RigVMController.set_pin_default_value(name+'_GT2.Space','LocalSpace')
TWSW = asAddNode (sp+'MathQuaternionSwingTwist','Execute',node_name=name+'_TWSW')
RigVMController.set_node_position (TWSW, [850, y+90])
RigVMController.add_link(name+'_GT2.Transform.Rotation' , name+'_TWSW.Input')
INV = asAddNode (sp+'MathQuaternionInverse','Execute',node_name=name+'_INV')
RigVMController.set_node_position (INV, [850, y+220])
RigVMController.add_link(name+'_TWSW.Twist' , name+'_INV.Value')
OFF= asAddNode (sp+'OffsetTransformForItem','Execute',node_name=name+'_OFF')
RigVMController.set_node_position (OFF, [1050, y])
RigVMController.set_pin_default_value(name+'_OFF.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_OFF.Item.Name',joint)
RigVMController.add_link(name+'_INV.Result' , name+'_OFF.OffsetTransform.Rotation')
RigVMController.add_link(endPlug , name+'_OFF.ExecuteContext')
endPlug = name+'_OFF.ExecuteContext'
GT3 = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GT3')
RigVMController.set_node_position (GT3, [1400, y+50])
RigVMController.set_pin_default_value(name+'_GT3.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_GT3.Item.Name',name)
ST2 = asAddNode (sp+'SetTranslation','Execute',node_name=name+'_ST2')
RigVMController.set_node_position (ST2, [1700, y])
RigVMController.set_pin_default_value(name+'_ST2.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_ST2.Item.Name',joint)
RigVMController.add_link(name+'_GT3.Transform.Translation' , name+'_ST2.Translation')
RigVMController.add_link(endPlug , name+'_ST2.ExecuteContext')
endPlug = name+'_ST2.ExecuteContext'
if "twistJoints" in PreviousArrayInfo:
twistJoints = int (PreviousArrayInfo["twistJoints"])
TwistArray = asAddNode (sp+'CollectionChainArray','Execute',node_name=name+'_TwistArray')
RigVMController.set_node_position (TwistArray , [500, y+50])
RigVMController.set_pin_default_value(name+'_TwistArray.FirstItem', '(Type=Bone,Name='+ParentRigBoneBaseName+'Part1'+side+')')
RigVMController.set_pin_default_value(name+'_TwistArray.LastItem', '(Type=Bone,Name='+ParentRigBoneBaseName+'Part'+str(twistJoints)+side+')')
TwistArrayIterator = RigVMController.add_array_node_from_object_path(unreal.RigVMOpCode.ARRAY_ITERATOR, 'FRigElementKey', '/project/.RigElementKey', unreal.Vector2D(0, 0), name+'_TwistArrayIterator')
RigVMController.set_node_position (TwistArrayIterator , [850, y])
RigVMController.add_link(name+'_TwistArray.Items' , name+'_TwistArrayIterator.Array')
RigVMController.add_link(endPlug , name+'_TwistArrayIterator.ExecuteContext')
endPlug = name+'_TwistArrayIterator.Completed'
GTRE = asAddNode (sp+'GetRelativeTransformForItem','Execute',node_name=name+'_GTRE')
RigVMController.set_node_position (GTRE , [1050, y+50])
RigVMController.set_pin_default_value(name+'_GTRE.Child', '(Type=Bone,Name='+joint+')')
RigVMController.set_pin_default_value(name+'_GTRE.Parent', '(Type=Bone,Name='+ParentRigBoneName+')')
TWSW = asAddNode (sp+'MathQuaternionSwingTwist','Execute',node_name=name+'_TWSW')
RigVMController.set_node_position (TWSW, [1350, y+50])
RigVMController.add_link(name+'_GTRE.RelativeTransform.Rotation' , name+'_TWSW.Input')
TOEU = asAddNode (sp+'MathQuaternionToEuler','Execute',node_name=name+'_TOEU')
RigVMController.set_node_position (TOEU, [1350, y+170])
RigVMController.add_link(name+'_TWSW.Twist' , name+'_TOEU.Value')
FREU = asAddNode (sp+'MathQuaternionFromEuler','Execute',node_name=name+'_FREU')
RigVMController.set_node_position (FREU, [1350, y+270])
RigVMController.add_link(name+'_TOEU.Result' , name+'_FREU.Euler')
QS = asAddNode (sp+'MathQuaternionScale','Execute',node_name=name+'_QS')
RigVMController.set_node_position (QS, [1550, y+50])
RigVMController.set_pin_default_value(name+'_QS.Scale', str(1.0/int (PreviousArrayInfo["twistJoints"])))
RigVMController.add_link(name+'_FREU.Result' , name+'_QS.Value')
SR = asAddNode (sp+'SetRotation','Execute',node_name=name+'_SR')
RigVMController.set_node_position (SR, [1700, y])
RigVMController.add_link(name+'_QS.Value' , name+'_SR.Rotation')
RigVMController.add_link(name+'_TwistArrayIterator.Element', name+'_SR.Item')
RigVMController.set_pin_default_value(name+'_SR.Space','LocalSpace')
RigVMController.set_pin_default_value(name+'_SR.bPropagateToChildren','False')
RigVMController.add_link(name+'_TwistArrayIterator.ExecuteContext' , name+'_SR.ExecuteContext')
#inbetweenJoints
if "inbetweenJoints" in arrayInfo:
inbetweenJoints = int (arrayInfo["inbetweenJoints"])
Chain = asAddNode (sp+'CollectionChainArray','Execute',node_name=name+'_Chain')
RigVMController.set_node_position (Chain, [500, y])
RigVMController.set_pin_default_value(name+'_Chain.FirstItem.Name',joint)
RigVMController.set_pin_default_value(name+'_Chain.LastItem.Name',baseName+'Part'+str(inbetweenJoints)+side)
#GTDistr
GTDistr = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GTDistr')
RigVMController.set_node_position (GTDistr, [850, y])
RigVMController.set_pin_default_value(name+'_GTDistr.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_GTDistr.Item.Name',name)
RigVMController.set_pin_default_value(name+'_GTDistr.Space','LocalSpace')
#Distr
Distr = asAddNode (sp+'DistributeRotationForItemArray','Execute',node_name=name+'_Distr')
RigVMController.set_node_position (Distr, [1200, y])
weight = (1.0 / inbetweenJoints)
RigVMController.set_pin_default_value(name+'_Distr.Weight',str(weight))
RigVMController.add_link(name+'_Chain.Items' , name+'_Distr.Items')
RigVMController.add_array_pin(name+'_Distr.Rotations')
RigVMController.add_link(name+'_GTDistr.Transform.Rotation' , name+'_Distr.Rotations.0.Rotation')
RigVMController.add_link(endPlug , name+'_Distr.ExecuteContext')
endPlug = name+'_Distr.ExecuteContext'
if "inbetweenJoints" in PreviousArrayInfo:
jointKey = asGetKeyFromName (joint)
jointTransform = hierarchy.get_global_transform(jointKey, initial = True)
NullKey = hierarchy_controller.add_null ('Null'+joint,asGetKeyFromName(parent),jointTransform)
#GTNull
GTNull = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GTNull')
RigVMController.set_node_position (GTNull, [1600, y+50])
RigVMController.set_pin_default_value(name+'_GTNull.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_GTNull.Item.Name',joint)
#STNull
STNull = asAddNode (sp+'SetTransform','Execute',node_name=name+'_STNull')
RigVMController.set_node_position (STNull, [2000, y])
RigVMController.set_pin_default_value(name+'_STNull.Item.Type','Null')
RigVMController.set_pin_default_value(name+'_STNull.Item.Name','Null'+joint)
RigVMController.add_link(name+'_GTNull.Transform' , name+'_STNull.Transform')
RigVMController.set_pin_default_value(name+'_STNull.bPropagateToChildren','True')
RigVMController.add_link(endPlug , name+'_STNull.ExecuteContext')
endPlug = name+'_STNull.ExecuteContext'
hierarchy_controller.set_parent(asGetKeyFromName(name),asGetKeyFromName('Null'+joint))
hierarchy.set_control_offset_transform(asGetKeyFromName(name), unreal.Transform(),initial=True)
hierarchy.set_local_transform(asGetKeyFromName(name), unreal.Transform(),initial=True)
hierarchy.set_local_transform(asGetKeyFromName(name), unreal.Transform(),initial=False)
#GT2
GT2 = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GT2')
RigVMController.set_node_position (GT2, [2400, y])
RigVMController.set_pin_default_value(name+'_GT2.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_GT2.Item.Name',name)
#ST2
ST2 = asAddNode (sp+'SetTransform','Execute',node_name=name+'_ST2')
RigVMController.set_node_position (ST2, [2700, y])
RigVMController.set_pin_default_value(name+'_ST2.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_ST2.Item.Name',joint)
RigVMController.add_link(name+'_GT2.Transform' , name+'_ST2.Transform')
RigVMController.set_pin_default_value(name+'_ST2.bPropagateToChildren','True')
RigVMController.add_link(endPlug , name+'_ST2.ExecuteContext')
endPlug = name+'_ST2.ExecuteContext'
if "global" in arrayInfo and float(arrayInfo["global"])==10:
Transform = hierarchy.get_global_transform(asGetKeyFromName(name), initial = True)
NullKey = hierarchy_controller.add_null ('Global'+name,asGetKeyFromName(parent),Transform)
hierarchy_controller.set_parent(asGetKeyFromName(name),asGetKeyFromName('Global'+name),maintain_global_transform=True)
hierarchy.set_control_offset_transform(asGetKeyFromName(name), unreal.Transform(), True, True)
hierarchy.set_local_transform(asGetKeyFromName(name), unreal.Transform(),initial=True)
hierarchy.set_local_transform(asGetKeyFromName(name), unreal.Transform(),initial=False)
#resolve where `PreviousEndPlug` is connected to, as that is the start for this Row, which is not always the _ST node
Pin = RigVMGraph.find_pin(PreviousEndPlug)
LinkePins = Pin.get_linked_target_pins()
PreviousEndPlugConnectedToPin = LinkePins[0].get_pin_path()
PNPGlobal = asAddNode (sp+'ProjectTransformToNewParent','Execute',node_name=name+'_PNPGlobal')
RigVMController.set_node_position (PNPGlobal, [-1200, y])
RigVMController.set_pin_default_value(name+'_PNPGlobal.Child.Type','Control')
RigVMController.set_pin_default_value(name+'_PNPGlobal.Child.Name',name)
RigVMController.set_pin_default_value(name+'_PNPGlobal.OldParent.Type','Control')
RigVMController.set_pin_default_value(name+'_PNPGlobal.OldParent.Name','Main')
RigVMController.set_pin_default_value(name+'_PNPGlobal.NewParent.Type','Control')
RigVMController.set_pin_default_value(name+'_PNPGlobal.NewParent.Name','Main')
#SRGlobal
SRGlobal = asAddNode (sp+'SetRotation','Execute',node_name=name+'_SRGlobal')
RigVMController.set_node_position (SRGlobal, [-850, y])
RigVMController.set_pin_default_value(name+'_SRGlobal.Item.Type','Null')
RigVMController.set_pin_default_value(name+'_SRGlobal.Item.Name','Global'+name)
RigVMController.set_pin_default_value(name+'_SRGlobal.bPropagateToChildren','True')
RigVMController.add_link(name+'_PNPGlobal.Transform.Rotation' , name+'_SRGlobal.Rotation')
RigVMController.add_link(PreviousEndPlug , name+'_SRGlobal.ExecuteContext')
endPlug = name+'_SRGlobal.ExecuteContext'
#STGlobal
STGlobal = asAddNode (sp+'SetTranslation','Execute',node_name=name+'_STGlobal')
RigVMController.set_node_position (STGlobal, [-850, y+250])
RigVMController.set_pin_default_value(name+'_STGlobal.Item.Type','Null')
RigVMController.set_pin_default_value(name+'_STGlobal.Item.Name','Global'+name)
RigVMController.set_pin_default_value(name+'_STGlobal.Space','LocalSpace')
RigVMController.set_pin_default_value(name+'_STGlobal.bPropagateToChildren','True')
Transform = hierarchy.get_local_transform(NullKey)
RigVMController.set_pin_default_value(name+'_STGlobal.Translation.X',str(Transform.translation.x))
RigVMController.set_pin_default_value(name+'_STGlobal.Translation.Y',str(Transform.translation.y))
RigVMController.set_pin_default_value(name+'_STGlobal.Translation.Z',str(Transform.translation.z))
RigVMController.add_link(name+'_SRGlobal.ExecuteContext' , name+'_STGlobal.ExecuteContext')
endPlug = name+'_STGlobal.ExecuteContext'
RigVMController.add_link(endPlug , PreviousEndPlugConnectedToPin)
endPlug = PreviousEndPlugConnectedToPin
if type=='IK':
asAddController ('Pole'+name, parent, joint, type, 'Sphere_Solid', ws, size/5.0, offT, color)
hierarchy.set_control_offset_transform(asGetKeyFromName('Pole'+name), unreal.Transform(location=[float(arrayInfo["ppX"])*RootScale.x,float(arrayInfo["ppZ"])*RootScale.y,float(arrayInfo["ppY"])*RootScale.z],scale=RootScale), True, True)
#IK(Basic IK)
IK = asAddNode (sp+'TwoBoneIKSimplePerItem','Execute',node_name=name+'_IK')
RigVMController.set_node_position (IK, [600, y-130])
RigVMController.set_pin_default_value(name+'_IK.ItemA.Name',arrayInfo["startJoint"])
RigVMController.set_pin_default_value(name+'_IK.ItemB.Name',arrayInfo["middleJoint"])
RigVMController.set_pin_default_value(name+'_IK.EffectorItem.Name',arrayInfo["endJoint"])
RigVMController.set_pin_default_value(name+'_IK.PrimaryAxis.X',arrayInfo["paX"])
RigVMController.set_pin_default_value(name+'_IK.PrimaryAxis.Y',arrayInfo["paY"])
RigVMController.set_pin_default_value(name+'_IK.PrimaryAxis.Z',arrayInfo["paZ"])
RigVMController.set_pin_default_value(name+'_IK.SecondaryAxis.X',arrayInfo["saX"])
RigVMController.set_pin_default_value(name+'_IK.SecondaryAxis.Y',arrayInfo["paY"])
RigVMController.set_pin_default_value(name+'_IK.SecondaryAxis.Z',arrayInfo["paZ"])
RigVMController.set_pin_default_value(name+'_IK.PoleVectorKind','Location')
RigVMController.set_pin_default_value(name+'_IK.PoleVectorSpace.Type','Control')
RigVMController.set_pin_default_value(name+'_IK.PoleVectorSpace.Name','Pole'+name)
RigVMController.set_pin_default_value(name+'_IK.bPropagateToChildren','True')
RigVMController.add_link(name+'_GT.Transform' , name+'_IK.Effector')
RigVMController.add_link(endPlug , name+'_IK.ExecuteContext')
endPlug = name+'_IK.ExecuteContext'
#GTPole
GTPole = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GTPole')
RigVMController.set_node_position (GTPole, [1000, y])
RigVMController.set_pin_default_value(name+'_GTPole.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_GTPole.Item.Name','Pole'+name)
#GTMidJoint
GTMidJoint = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GTMidJoint')
RigVMController.set_node_position (GTMidJoint, [1000, y+200])
RigVMController.set_pin_default_value(name+'_GTMidJoint.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_GTMidJoint.Item.Name',arrayInfo["middleJoint"])
PoleLine = asAddNode (sp+'DebugLineItemSpace','Execute',node_name=name+'_PoleLine')
RigVMController.set_node_position (PoleLine, [1350, y])
RigVMController.add_link(name+'_GTPole.Transform.Translation' , name+'_PoleLine.A')
RigVMController.add_link(name+'_GTMidJoint.Transform.Translation' , name+'_PoleLine.B')
RigVMController.add_link(endPlug , name+'_PoleLine.ExecuteContext')
endPlug = name+'_PoleLine.ExecuteContext'
control_value = hierarchy.make_control_value_from_euler_transform(unreal.EulerTransform(scale=[1, 1, 1]))
control_settings = unreal.RigControlSettings()
control_settings.shape_visible = False
hierarchy_controller.add_control (name+'LS','', control_settings,control_value)
hierarchy_controller.set_parent(asGetKeyFromName(name+'LS'),asGetKeyFromName(name),maintain_global_transform=False)
RigVMController.set_pin_default_value(name+'_GT.Item.Name',name+'LS')
for key in RigElementKeys:
if (key.name == arrayInfo["endJoint"]):
endJointKey = key
EndJointTransform = hierarchy.get_global_transform(endJointKey, initial = False)
Rotation = EndJointTransform.rotation.rotator()
Transform = unreal.Transform(location=[0,0,0],rotation=Rotation,scale=[1,1,1])
hierarchy.set_control_offset_transform(asGetKeyFromName(name+'LS'), Transform, True, True)
#Backwards solve nodes (IK)
PNPinvIK = asAddNode (sp+'ProjectTransformToNewParent','Execute',node_name=name+'_PNPinvIK')
RigVMController.set_node_position (PNPinvIK, [-2900, y])
RigVMController.set_pin_default_value(name+'_PNPinvIK.Child.Type','Control')
RigVMController.set_pin_default_value(name+'_PNPinvIK.Child.Name',name)
RigVMController.set_pin_default_value(name+'_PNPinvIK.OldParent.Type','Control')
RigVMController.set_pin_default_value(name+'_PNPinvIK.OldParent.Name',name+'LS')
RigVMController.set_pin_default_value(name+'_PNPinvIK.NewParent.Type','Bone')
RigVMController.set_pin_default_value(name+'_PNPinvIK.NewParent.Name',joint)
#STinvIK
STinvIK = asAddNode (sp+'SetTransform','Execute',node_name=name+'_STinvIK')
RigVMController.set_node_position (STinvIK, [-2500, y])
RigVMController.set_pin_default_value(name+'_STinvIK.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_STinvIK.Item.Name',name)
RigVMController.set_pin_default_value(name+'_STinvIK.bPropagateToChildren','True')
RigVMController.add_link(name+'_PNPinvIK.Transform' , name+'_STinvIK.Transform')
RigVMController.add_link(PreviousEndPlugInv , name+'_STinvIK.ExecuteContext')
#GTinvPole
GTinvPole = asAddNode (sp+'GetTransform','Execute',node_name=name+'_GTinvPole')
RigVMController.set_node_position (GTinvPole, [-1700, y])
RigVMController.set_pin_default_value(name+'_GTinvPole.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_GTinvPole.Item.Name',arrayInfo["middleJoint"])
#STinvPole
STinvPole = asAddNode (sp+'SetTransform','Execute',node_name=name+'_STinvPole')
RigVMController.set_node_position (STinvPole, [-1300, y])
RigVMController.set_pin_default_value(name+'_STinvPole.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_STinvPole.Item.Name','Pole'+name)
RigVMController.set_pin_default_value(name+'_STinvPole.bPropagateToChildren','True')
RigVMController.add_link(name+'_GTinvPole.Transform' , name+'_STinvPole.Transform')
RigVMController.add_link(name+'_STinvIK.ExecuteContext' , name+'_STinvPole.ExecuteContext')
endPlugInv = name+'_STinvPole.ExecuteContext'
PreviousEndPlugInv = endPlugInv
PreviousYInv = y
if "twistJoints" in arrayInfo or "inbetweenJoints" in arrayInfo:
PreviousArrayInfo = arrayInfo
else:
PreviousArrayInfo.clear()
#DrivingSystem
if type=='DrivingSystem' or type=='ctrlBox':
if type=='DrivingSystem':
RigVMController.set_pin_default_value(name+'_GT.Item.Type','Bone')
RigVMController.set_pin_default_value(name+'_GT.Item.Name',joint)
RigVMController.set_pin_default_value(name+'_ST.Item.Type','Control')
RigVMController.set_pin_default_value(name+'_ST.Item.Name',name)
if type=='ctrlBox' and name!='ctrlBox':
parentTransform = hierarchy.get_global_control_offset_transform(asGetKeyFromName(parent))
Transform = unreal.Transform(location=[offT[0]/ctrlBoxSize,offT[2]/ctrlBoxSize,offT[1]/ctrlBoxSize])
Transform.rotation = [0,0,0]
hierarchy.set_control_offset_transform(asGetKeyFromName(name), Transform, True, True)
if name=='ctrlBox':
#add a ws oriented ctrlBoxOffset
NullKey = hierarchy_controller.add_null ('ctrlBoxOffset',asGetKeyFromName(parent),unreal.Transform(),transform_in_global=True)
Transform = hierarchy.get_local_transform(NullKey)
Transform.translation = [0,0,0]
hierarchy.set_local_transform(NullKey, Transform,initial=True)
hierarchy.set_local_transform(NullKey, Transform,initial=False)
hierarchy_controller.set_parent(asGetKeyFromName(name),NullKey,maintain_global_transform=False)
ctrlBoxSize = float (arrayInfo["ctrlBoxSize"])
Scale = [0.6,1.5,1.0]
ctrlBoxScale = [ctrlBoxSize,ctrlBoxSize,ctrlBoxSize]
parentTransform = hierarchy.get_global_control_offset_transform(asGetKeyFromName(parent))
Transform2 = hierarchy.get_global_control_offset_transform(asGetKeyFromName(name)).make_relative(parentTransform)
Transform2.translation = [offT[0],offT[2],offT[1]]
Transform2.scale3d = [ctrlBoxSize,ctrlBoxSize,ctrlBoxSize]
hierarchy.set_control_offset_transform(asGetKeyFromName(name), Transform2, True, True)
Transform = unreal.Transform(location=[0,0,-1],rotation=[0,0,90],scale=Scale)
hierarchy.set_control_shape_transform(asGetKeyFromName(name), Transform, True, True)
return
nonTransformFaceCtrl = False
if type=='ctrlBox':
RigVMController.remove_node(RigVMGraph.find_node_by_name(name+'_GT'))
RigVMController.remove_node(RigVMGraph.find_node_by_name(name+'_ST'))
Transform = unreal.Transform(scale=[0.05,0.05,0.05])
hierarchy.set_control_shape_transform(asGetKeyFromName(name), Transform, True, True)
maxXform = unreal.Vector2D(1,1)
minXform = unreal.Vector2D(-1,-1)
if name=='ctrlMouth_M':
maxXform = [1,0]
if re.search("^ctrlCheek_", name) or re.search("^ctrlNose_", name):
minXform = [-1,0]
RigElementKeys = asGetRigElementKeys ()
for key in RigElementKeys:
if (key.name == name):
RigControlKey = key
hierarchy.set_control_value(asGetKeyFromName(name),unreal.RigHierarchy.make_control_value_from_vector2d(maxXform), unreal.RigControlValueType.MAXIMUM)
hierarchy.set_control_value(asGetKeyFromName(name),unreal.RigHierarchy.make_control_value_from_vector2d(minXform), unreal.RigControlValueType.MINIMUM)
endPlug = PreviousEndPlug
control_settings = unreal.RigControlSettings()
control_value = hierarchy.make_control_value_from_euler_transform(unreal.EulerTransform(scale=[1, 1, 1]))
AttrGrpkey = hierarchy_controller.add_control (name+"_Attributes",'',control_settings,control_value)
if type=='ctrlBox':
hierarchy_controller.set_parent(asGetKeyFromName(name+"_Attributes"),asGetKeyFromName(parent),maintain_global_transform=False)
else:
hierarchy_controller.set_parent(asGetKeyFromName(name+"_Attributes"),asGetKeyFromName(name),maintain_global_transform=False)
hierarchy.set_control_shape_transform(asGetKeyFromName(name+"_Attributes"), unreal.Transform(location=[0,0,0],rotation=[0,0,0],scale=[0,0,0]), True)
Transform = hierarchy.get_global_control_offset_transform(asGetKeyFromName(name), initial = True).copy()
parentTransform = hierarchy.get_global_transform(asGetKeyFromName(parent), initial = True)
Transform = Transform.make_relative(parentTransform)
if type=='ctrlBox':
Transform.translation.x = -5.0
Transform.translation.z += 0.8
if re.search("_L", name) or re.search("_M", name):
Transform.translation.x = 4.0
if nonTransformFaceCtrl:
Transform.translation.z = -5.5-(nonTransformFaceCtrlNr*2) # stack rows of sliders downwards
else:
numAttrs = 0
for Attr in arrayInfo.keys():
if not re.search("-set", Attr):
numAttrs=numAttrs+1
Transform = unreal.Transform(location=[0,0,0],rotation=[0,0,0],scale=[1,1,1])
Transform.translation.x = offT[0]
Transform.translation.y = numAttrs*0.5*(size/4.0)*-0.5
Transform.translation.z = size/8.0
if re.search("_L", name):
Transform.translation.z *= -1
Transform.scale3d=[size/4.0,size/4.0,size/4.0]
hierarchy.set_control_offset_transform(asGetKeyFromName(name+"_Attributes"), Transform, True, True)
Attrs = arrayInfo.keys()
attrNr = 0
for Attr in Attrs:
if re.search("-set", Attr):
if re.search("-setLimits", Attr):
DictDrivens = arrayInfo.get(Attr)
min = float(list(DictDrivens.keys())[0])
max = float(list(DictDrivens.values())[0])
RigElementKeys = asGetRigElementKeys ()
for key in RigElementKeys:
if key.name == name+"_"+Attr.replace("-setLimits", ""):
hierarchy.set_control_value(key, unreal.RigHierarchy.make_control_value_from_float(min), unreal.RigControlValueType.MINIMUM)
hierarchy.set_control_value(key, unreal.RigHierarchy.make_control_value_from_float(max), unreal.RigControlValueType.MAXIMUM)
continue
transformAttrDriver = True
if not re.search("translate", Attr) or re.search("rotate", Attr) or re.search("scale", Attr):
control_settings = unreal.RigControlSettings()
control_settings.control_type = unreal.RigControlType.FLOAT
control_settings.shape_color = unreal.LinearColor(r=1.0, g=0.0, b=0.0, a=1.0)
control_settings.limit_enabled = [unreal.RigControlLimitEnabled(True, True)]
if nonTransformFaceCtrl or re.search("_M", name):
control_settings.primary_axis = unreal.RigControlAxis.Z
else:
control_settings.primary_axis = unreal.RigControlAxis.X
key = hierarchy_controller.add_control (name+"_"+Attr,asGetKeyFromName(name+"_Attributes"),control_settings,control_value)
hierarchy.set_control_value(key,unreal.RigHierarchy.make_control_value_from_float(1), unreal.RigControlValueType.MAXIMUM)
hierarchy.set_control_shape_transform(asGetKeyFromName(name+"_"+Attr), unreal.Transform(location=[0,0,0],rotation=[0,0,0],scale=[0.035,0.035,0.035]), True, True)
Transform = unreal.Transform(location=[0,0,0],rotation=[0,0,0],scale=[1,1,1])
Transform.translation = [0,attrNr*0.5,0]
if type=='ctrlBox':
Transform.translation = [0,0,attrNr*-0.5]
if nonTransformFaceCtrl or re.search("_M", name):
Transform.translation = [attrNr,0,0]
attrNr = attrNr+1
hierarchy.set_control_offset_transform(asGetKeyFromName(name+"_"+Attr), Transform, True, True)
transformAttrDriver = False
DictDrivens = arrayInfo.get(Attr)
KeysDrivens = DictDrivens.keys()
for Driven in KeysDrivens:
Value = float(DictDrivens.get(Driven))
x2 = ASDrivenNr*1200
dNr = str(ASDrivenNr)
ASDrivenNr = ASDrivenNr+1
x = Driven.split(".")
obj = x[0]
attr = '_'+x[1]
axis = attr[-1]
valueMult = 1
if re.search("rotate", attr):
if axis == 'X' or axis=='Z':
valueMult = -1
if re.search("translate", attr):
if axis=='Y':
valueMult = -1
multiplier = Value*valueMult
asFaceBSDriven = False
if re.search("asFaceBS[.]", Driven):#asFaceBS
asFaceBSDriven = True
if not (asFaceBSDriven):
RigElementKeys = asGetRigElementKeys ()
for key in RigElementKeys:
if key.name == obj:
objKey = key
if not asObjExists ('Offset'+obj):
objParentKey = hierarchy.get_parents(objKey)[0]
OffsetKey = hierarchy_controller.add_null ('Offset'+obj,objKey,unreal.Transform(),transform_in_global=False)
hierarchy_controller.set_parent(OffsetKey,objParentKey)
hierarchy_controller.set_parent(objKey,OffsetKey)
parentTo = 'Offset'+obj
for x in range(1,9):
sdk = 'SDK'+obj+"_"+str(x)
if not asObjExists (sdk):
break
if x>1:
parentTo = 'SDK'+obj+"_"+str(x-1)
SDKKey = hierarchy_controller.add_null (sdk,asGetKeyFromName(parentTo),unreal.Transform(),transform_in_global=False)
hierarchy_controller.set_parent(objKey,SDKKey)
#GTDriver
if transformAttrDriver:
GTDriver = asAddNode (sp+'GetControlVector2D','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_GTDriver')
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_GTDriver.Control',name)
gtPlug = name+"_"+obj+"_"+attr+dNr+'_GTDriver.Vector.'+Attr[-1]#Attr[-1] is DriverAxis
else:
GTDriver = asAddNode (sp+'GetControlFloat','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_GTDriver')
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_GTDriver.Control',name+"_"+Attr)
gtPlug = name+"_"+obj+"_"+attr+dNr+'_GTDriver.FloatValue'
RigVMController.set_node_position (GTDriver, [500+x2, y])
#MFM
MFM = asAddNode (sp+'MathFloatMul','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_MFM')
RigVMController.set_node_position (MFM, [900+x2, y])
RigVMController.add_link(gtPlug , name+"_"+obj+"_"+attr+dNr+'_MFM.A')
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_MFM.B',str(multiplier))
if asFaceBSDriven:
#Clamp
Clamp = asAddNode (sp+'MathFloatClamp','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_Clamp')
RigVMController.set_node_position (Clamp, [900+x2, y+100])
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_Clamp.Maximum','5.0')
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_MFM.Result' , name+"_"+obj+"_"+attr+dNr+'_Clamp.Value')
#STDriven
STDriven = asAddNode (sp+'SetCurveValue','Execute',node_name=name+"_"+Attr+"_"+attr+'_STDriven')
RigVMController.set_node_position (STDriven, [1100+x2, y])
RigVMController.set_pin_default_value(name+"_"+Attr+"_"+attr+'_STDriven.Curve',Driven.split(".")[1])
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_Clamp.Result' , name+"_"+Attr+"_"+attr+'_STDriven.Value')
RigVMController.add_link(endPlug , name+"_"+Attr+"_"+attr+'_STDriven.ExecuteContext')
endPlug =name+"_"+Attr+"_"+attr+'_STDriven.ExecuteContext'
else:
#STDriven
STDriven = asAddNode (sp+'SetTransform','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_STDriven')
RigVMController.set_node_position (STDriven, [1300+x2, y])
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_STDriven.Item.Type','Null')
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_STDriven.Item.Name',sdk)
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_STDriven.Space','LocalSpace')
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_STDriven.bPropagateToChildren','True')
RigVMController.add_link(endPlug , name+"_"+obj+"_"+attr+dNr+'_STDriven.ExecuteContext')
endPlug = name+"_"+obj+"_"+attr+dNr+'_STDriven.ExecuteContext'
#TFSRT
TFSRT = asAddNode (sp+'MathTransformFromSRT','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_TFSRT')
RigVMController.set_node_position (TFSRT, [900+x2, y+150])
if re.search("translate", attr):
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_MFM.Result' , name+"_"+obj+"_"+attr+dNr+'_TFSRT.Location.'+axis)
if re.search("rotate", attr):
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_MFM.Result' , name+"_"+obj+"_"+attr+dNr+'_TFSRT.Rotation.'+axis)
if re.search("scale", attr):
#scale just add 1, not accurate but simplified workaround
MFA = asAddNode (sp+'MathFloatAdd','Execute',node_name=name+"_"+obj+"_"+attr+dNr+'_MFA')
RigVMController.set_node_position (MFA, [1100+x2, y])
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_MFM.Result' , name+"_"+obj+"_"+attr+dNr+'_MFA.A')
RigVMController.set_pin_default_value(name+"_"+obj+"_"+attr+dNr+'_MFA.B','1')
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_MFA.Result' , name+"_"+obj+"_"+attr+dNr+'_TFSRT.Scale.'+axis)
RigVMController.add_link(name+"_"+obj+"_"+attr+dNr+'_TFSRT.Transform', name+"_"+obj+"_"+attr+dNr+'_STDriven.Transform')
#face
if re.search("Teeth_M", name):
RigControl.set_editor_property('gizmo_enabled',False)
hierarchy_controller.set_control (RigControl)
if name=="Jaw_M":
RigControl.set_editor_property('gizmo_name','HalfCircle_Thick')
RigControl.set_editor_property('gizmo_name','HalfCircle_Thick')
Transform = RigControl.get_editor_property('gizmo_transform')
Transform.rotation=[0,0,180]
RigControl.set_editor_property('gizmo_transform', Transform)
hierarchy_controller.set_control (RigControl)
PreviousEndPlug = endPlug
def asObjExists (obj):
RigElementKeys = asGetRigElementKeys ()
LocObject = None
for key in RigElementKeys:
if key.name == obj:
return True
return False
def asAddController (name, parent, joint, type, gizmoName, ws, size, offT, color):
parentKey = asGetKeyFromName(parent)
if gizmoName=='Gizmo':
gizmoName='Default'
control_settings = unreal.RigControlSettings()
if type=='ctrlBox':
control_settings.control_type = unreal.RigControlType.VECTOR2D
control_settings.primary_axis = unreal.RigControlAxis.Y
control_settings.limit_enabled = [unreal.RigControlLimitEnabled(True, True), unreal.RigControlLimitEnabled(True, True)]
else:
control_settings.control_type = unreal.RigControlType.EULER_TRANSFORM
if name=='ctrlEmotions_M' or name=='ctrlPhonemes_M' or name=='ctrlARKit_M' or name=='ctrlBoxRobloxHead_M':
control_settings.shape_visible = False
control_settings.shape_name = gizmoName
control_settings.shape_color = unreal.LinearColor(color[0], color[1], color[2], 1.0)
control_value = hierarchy.make_control_value_from_euler_transform(unreal.EulerTransform(scale=[1, 1, 1]))
key = hierarchy_controller.add_control (name,'', control_settings,control_value)
jointKey = asGetKeyFromName (joint)
jointTransform = hierarchy.get_global_transform(jointKey, initial = True)
if ws == 1:
jointTransform.rotation = [0,0,0]
parentTransform = unreal.Transform()
if parent!='':
parentTransform = hierarchy.get_global_transform(asGetKeyFromName(parent), initial = True)
OffsetTransform = jointTransform.make_relative(parentTransform)
if parent!='':
hierarchy_controller.set_parent(asGetKeyFromName(name),asGetKeyFromName(parent),maintain_global_transform=False)
hierarchy.set_control_offset_transform(key, OffsetTransform, True, True)
GizmoLocation = [offT[0],offT[2],offT[1]]
GizmoRotation = [0,0,0]
if ws == 0:
GizmoRotation = [90,0,0]
GizmoLocation = [offT[0],offT[1]*-1,offT[2]]
if type=="DrivingSystem":
GizmoRotation = [0,0,0]
if type=="ctrlBox":
GizmoRotation = [0,0,90]
if re.search("^Eye_.*", name) or re.search("^Iris_.*", name) or re.search("^Pupil_.*", name):
GizmoRotation = [0,90,0]
hierarchy.set_control_visibility(key,False)
x = re.search("^Pole.*", name)
if x:
GizmoLocation = [0,0,0]
s = 0.1*size
Scale = [s,s,s]
if type=='FK' and ws == 0:
Scale[2]*=2.5
hierarchy.set_control_shape_transform(key, unreal.Transform(location=GizmoLocation,rotation=GizmoRotation,scale=Scale), True)
def asAddNode (script_struct_path, method_name, node_name):
#RigVMController.
#add_struct_node_from_struct_path UE4
#add_unit_node_from_struct_path UE5
try:
node = RigVMController.add_struct_node_from_struct_path(script_struct_path,method_name,node_name=node_name) #UE4
except:
node = RigVMController.add_unit_node_from_struct_path(script_struct_path,method_name,node_name=node_name) #UE5
return node
def asGetRigElementKeys ():
try:
RigElementKeys = hierarchy_controller.get_elements() #UE4
except:
RigElementKeys = hierarchy.get_all_keys() #UE5
return RigElementKeys
def asGetKeyFromName (name):
all_keys = hierarchy.get_all_keys(traverse = True)
for key in all_keys:
if key.name == name:
return key
return ''
def asBackwardsSolveNodes ():
global PreviousYInv
global PreviousEndPlugInv
PNP = asAddNode (sp+'ProjectTransformToNewParent','Execute',node_name='Root_M_PNP')
RigVMController.set_node_position (PNP, [-1500, PreviousYInv+400-90])
RigVMController.set_pin_default_value('Root_M_PNP.Child.Type','Control')
RigVMController.set_pin_default_value('Root_M_PNP.Child.Name','RootX_M')
RigVMController.set_pin_default_value('Root_M_PNP.OldParent.Type','Bone')
RigVMController.set_pin_default_value('Root_M_PNP.OldParent.Name','Root_M')
RigVMController.set_pin_default_value('Root_M_PNP.NewParent.Type','Bone')
RigVMController.set_pin_default_value('Root_M_PNP.NewParent.Name','Root_M')
STInv = asAddNode (sp+'SetTransform','Execute',node_name='Root_M_STInv')
RigVMController.set_node_position (STInv, [-1200, PreviousYInv+400-90])
RigVMController.set_pin_default_value('Root_M_STInv.Item.Type','Control')
RigVMController.set_pin_default_value('Root_M_STInv.Item.Name','RootX_M')
RigVMController.add_link('Root_M_PNP.Transform' , 'Root_M_STInv.Transform')
RigVMController.add_link(PreviousEndPlugInv , 'Root_M_STInv.ExecuteContext')
CCinv = asAddNode (sp+'CollectionChildren','Execute',node_name='CCinv')
RigVMController.set_node_position (CCinv, [-2600, PreviousYInv+1000])
RigVMController.set_pin_default_value('CCinv.Parent.Type','Bone')
RigVMController.set_pin_default_value('CCinv.Parent.Name','Root_M')
RigVMController.set_pin_default_value('CCinv.bRecursive','True')
RigVMController.set_pin_default_value('CCinv.TypeToSearch','Bone')
CLinv = asAddNode (sp+'CollectionLoop','Execute',node_name='CLinv')
RigVMController.set_node_position (CLinv, [-2150, PreviousYInv+1000])
RigVMController.add_link('Root_M_STInv.ExecuteContext' , 'CLinv.ExecuteContext')
RigVMController.add_link('CCinv.Collection' , 'CLinv.Collection')
PreviousEndPlugInv = 'CLinv.Completed'
NCinv = asAddNode (sp+'NameConcat','Execute',node_name='NCinv')
RigVMController.set_node_position (NCinv, [-1900, PreviousYInv+900])
RigVMController.set_pin_default_value('NCinv.A','FK')
RigVMController.add_link('CLinv.Item.Name' , 'NCinv.B')
GTinv = asAddNode (sp+'GetTransform','Execute',node_name='GTinv')
RigVMController.set_node_position (GTinv, [-1900, PreviousYInv+1000])
RigVMController.add_link('CLinv.Item.Name' , 'GTinv.Item.Name')
IEinv = asAddNode (sp+'ItemExists','Execute',node_name='IEinv')
RigVMController.set_node_position (IEinv, [-1700, PreviousYInv+700])
RigVMController.set_pin_default_value('IEinv.Item.Type','Control')
RigVMController.add_link('NCinv.Result' , 'IEinv.Item.Name')
BRinv = RigVMController.add_branch_node(node_name='BRinv')
RigVMController.set_node_position (BRinv, [-1650, PreviousYInv+850])
RigVMController.add_link('IEinv.Exists' , 'BRinv.Condition')
RigVMController.add_link('CLinv.ExecuteContext' , 'BRinv.ExecuteContext')
STinv = asAddNode (sp+'SetTransform','Execute',node_name='STinv')
RigVMController.set_node_position (STinv, [-1500, PreviousYInv+1000])
RigVMController.set_pin_default_value('STinv.Item.Type','Control')
RigVMController.add_link('NCinv.Result' , 'STinv.Item.Name')
RigVMController.add_link('GTinv.Transform' , 'STinv.Transform')
RigVMController.add_link('BRinv.True' , 'STinv.ExecuteContext')
def main ():
global PreviousEndPlugInv
RigElementKeys = asGetRigElementKeys ()
RigVMGraph = blueprint.get_model()
#Clear out existing rig-setup
nodes = RigVMGraph.get_nodes()
for node in nodes:
RigVMController.remove_node(node)
#Clear out existing controllers
for key in RigElementKeys:
if key.name == 'MotionSystem':
hierarchy_controller.remove_element(key)
elif not (key.type==1 or key.type==8): #BONE
try:
hierarchy_controller.remove_element(key)
except:
pass
#UE5 does not include deleting children, so we will try to clean-up
controls = hierarchy.get_controls()
for key in controls:
hierarchy_controller.remove_element(key)
nulls = hierarchy.get_nulls()
for key in nulls:
hierarchy_controller.remove_element(key)
bones = hierarchy.get_bones()
for key in bones:
x = re.search("UnTwist", str(key.name))
if x:
hierarchy_controller.remove_element(key)
BeginExecutionNode = asAddNode (sp+'BeginExecution','Execute',node_name='RigUnit_BeginExecution')
RigVMController.set_node_position (BeginExecutionNode, [-300, -100])
InverseExecutionNode = asAddNode (sp+'InverseExecution','Execute',node_name='RigUnit_InverseExecution')
RigVMController.set_node_position (InverseExecutionNode, [-1900, -100])
MotionSystemKey = hierarchy_controller.add_null ('MotionSystem','',unreal.Transform())
MainSystemKey = hierarchy_controller.add_null ('MainSystem',MotionSystemKey,unreal.Transform())
DrivingSystemKey = hierarchy_controller.add_null ('DrivingSystem',MotionSystemKey ,unreal.Transform())
#//-- ASControllers Starts Here --//
#//-- ASControllers Ends Here --//
asBackwardsSolveNodes()
print ("ControlRig created")
if __name__ == "__main__":
main()
|
'''
from Lib import __lib_topaz__ as topaz
import unreal
temp : unreal.AnimSequence = topaz.get_selected_asset()
print(temp)
ctrl : unreal.AnimationDataController = temp.controller # 형변환
print(ctrl)
f_t0 = unreal.FrameNumber(3951) #3951
f_t1 = unreal.FrameNumber(4251) #4251 끝프레임4751
frames = unreal.FrameNumber.__sub__(f_t1, f_t0) #300
ctrl.resize_number_of_frames(frames,f_t0, f_t1)
print(frames)
'''
# API가 제대로 안돔... t0,t1무시하고 부조건 0부터 짤림
import unreal
selected_assets = unreal.EditorUtilityLibrary.get_selected_assets()
start_frame = 3951
end_frame = 4251
t0 = unreal.FrameNumber(start_frame)
t1 = unreal.FrameNumber(end_frame)
frames = unreal.FrameNumber.__sub__(t1, t0)
if selected_assets:
asset = selected_assets[-1] # 첫 번째 선택된 에셋을 가져옴
if isinstance(asset, unreal.AnimSequence):
control_anim_asset : unreal.AnimationDataController = asset.controller
print(control_anim_asset)
print(asset)
control_anim_asset.resize_number_of_frames(frames,t0,t1)
else :
print("Animation Sequence가 아닙니다.")
|
# py "C:/project/.EngTools/project/.py"
# https://dev.epicgames.com/project/-us/unreal-engine/unreal-engine-material-expressions-reference
import unreal
import sys
import inspect
import types
from pathlib import Path
from importlib import *
sys.path.append(str(Path(__file__).parent.parent.parent.parent.parent.resolve()))
reloads = []
for k, v in sys.modules.items():
if k.startswith("pamux_unreal_tools") and "generated" not in k:
reloads.append(v)
for module in reloads:
reload(module)
# from pamux_unreal_tools.tools.code_generators.py_code_generator import *
from pamux_unreal_tools.tools.code_generators.base.code_generator_base import *
from pamux_unreal_tools.tools.code_generators.cpp_code_generator import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.ctor_params import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.globals import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.values import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.unreal_dump import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.inputs import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.outputs import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator.properties import *
from pamux_unreal_tools.tools.code_generators.material_expression_wrapper_generator import custom_base_classes
codeGen = CppCodeGenerator()
codeGen.declaration_filepath = generated_h_out_filepath
codeGen.definition_filepath = generated_cpp_out_filepath
def generate_pamux_wrapper_class(codeGen: CodeGeneratorBase, c: unreal.MaterialExpression):
pamux_wrapper_class_name = c.__name__[len("MaterialExpression"):]
inputs = setup_input_sockets(pamux_wrapper_class_name)
outputs = setup_output_sockets(pamux_wrapper_class_name)
properties = setup_properties(pamux_wrapper_class_name, c.__doc__)
ctor_params = setup_ctor_params(pamux_wrapper_class_name)
base_class_candidate_name = f"{pamux_wrapper_class_name}Base"
if hasattr(custom_base_classes, base_class_candidate_name):
base_class_name = base_class_candidate_name
else:
base_class_name = "MaterialExpressionImpl"
codeGen.append_blank_line()
codeGen.begin_class(pamux_wrapper_class_name, base_class_name)
# codeGen.append_blank_line()
codeGen.begin_ctor(pamux_wrapper_class_name, ctor_params)
codeGen.append_base_ctor_call(base_class_name, f"unreal.MaterialExpression{pamux_wrapper_class_name}, node_pos")
properties.to_py("MaterialExpressionEditorPropertyImpl", codeGen)
inputs.to_py("InSocketImpl", codeGen)
outputs.to_py("OutSocketImpl", codeGen)
# codeGen.append_line(f"self.properties = {pamux_wrapper_class_name}.Properties()")
# codeGen.append_line(f"self.inputs = {pamux_wrapper_class_name}.Inputs()")
# codeGen.append_line(f"self.outputs = {pamux_wrapper_class_name}.Outputs()")
ctor_params.append_assignment_lines(codeGen)
codeGen.end_ctor()
codeGen.end_class()
def generate_pamux_wrapper_classes():
read_dump_data()
codeGen.append_import("unreal")
codeGen.append_import_from("pamux_unreal_tools.impl.material_expression_impl", "MaterialExpressionImpl")
codeGen.append_import_from("pamux_unreal_tools.impl.material_expression_editor_property_impl", "MaterialExpressionEditorPropertyImpl")
codeGen.append_import_from("pamux_unreal_tools.impl.in_socket_impl", "InSocketImpl")
codeGen.append_import_from("pamux_unreal_tools.impl.out_socket_impl", "OutSocketImpl")
codeGen.append_import_from("pamux_unreal_tools.utils.node_pos", "NodePos")
codeGen.append_include("Materials/MaterialExpressionBreakMaterialAttributes.h")
codeGen.append_include("MaterialExpressionImpl.h")
codeGen.append_include("MaterialExpressionEditorPropertyImpl.h")
codeGen.append_include("InSocketImpl.h")
codeGen.append_include("OutSocketImpl.h")
codeGen.append_include("NodePos.h")
for class_name in dir(unreal):
c = getattr(unreal, class_name)
if not inspect.isclass(c):
continue
if not issubclass(c, unreal.MaterialExpression):
continue
if c == unreal.MaterialExpression:
continue
if c.__name__ in skip_these_classes:
continue
generate_pamux_wrapper_class(codeGen, c)
codeGen.write(codeGen.declaration_filepath)
generate_pamux_wrapper_classes()
|
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## IMPORTS
#####
import unreal
print('starting ai_updates.py')
######## THING
#####
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## VARIABLES
#####
######## THING
#####
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## FUNCTIONS
#####
######## THING
#####
# ######## RETRIEVE UPDATE DATA
# #####
# def retrieve_update_data(packname='visai'):
# if packname=="visai":
# dataURL = 'https://ai.vis-foundation.com/project/.html' # Get Data
# elif packname=="visgm":
# dataURL = 'https://gm.vis-foundation.com/project/.html' # Get Data
# else:
# return "pack not recognized. use all lowercase, no spaces"
# # Cleanup
# thePage = str(urllib2.urlopen(dataURL).read()) # Format to a string
# parsedReq = thePage.split("#") # Split String Data
# del parsedReq[0] # Remove Null Data
# del parsedReq[-1] # Remove Null Data
# return parsedReq
# ######## CLEAN UPDATE DATA
# #####
# def clean_update_data(listToClean):
# versionUpdateDataList = []
# for i in listToClean:
# j = i.replace(' ','')
# k = j.replace('\\n','')
# versionUpdateDataList.append(k)
# return versionUpdateDataList
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## THE APP
#####
######## THING
#####
print('ai_updates.py has been initialized')
|
import unreal
import datetime
import random
import json
unreal.log("It's {}".format(datetime.datetime.now()))
unreal.log("A dice roll: {}".format(random.randrange(1, 6)))
user_config = {
"name": "Bob",
"screen_size": (1920, 1080),
"is_active": True,
}
unreal.log("The project is located at {}".format(unreal.Paths.project_dir()))
with open(unreal.Paths.project_dir()+'user_config.json', 'w') as f:
json.dump(user_config, f)
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" An example script that uses the API to instantiate two HDAs. The first HDA
will be used as an input to the second HDA. For the second HDA we set 2 inputs:
an asset input (the first instantiated HDA) and a curve input (a helix). The
inputs are set during post instantiation (before the first cook). After the
first cook and output creation (post processing) the input structure is fetched
and logged.
"""
import math
import unreal
_g_wrapper1 = None
_g_wrapper2 = None
def get_copy_curve_hda_path():
return '/project/.copy_to_curve_1_0'
def get_copy_curve_hda():
return unreal.load_object(None, get_copy_curve_hda_path())
def get_pig_head_hda_path():
return '/project/.pig_head_subdivider_v01'
def get_pig_head_hda():
return unreal.load_object(None, get_pig_head_hda_path())
def configure_inputs(in_wrapper):
print('configure_inputs')
# Unbind from the delegate
in_wrapper.on_post_instantiation_delegate.remove_callable(configure_inputs)
# Create a geo input
asset_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPIAssetInput)
# Set the input objects/assets for this input
# asset_input.set_input_objects((_g_wrapper1.get_houdini_asset_actor().houdini_asset_component, ))
asset_input.set_input_objects((_g_wrapper1, ))
# copy the input data to the HDA as node input 0
in_wrapper.set_input_at_index(0, asset_input)
# We can now discard the API input object
asset_input = None
# Create a curve input
curve_input = in_wrapper.create_empty_input(unreal.HoudiniPublicAPICurveInput)
# Create a curve wrapper/helper
curve_object = unreal.HoudiniPublicAPICurveInputObject(curve_input)
# Make it a Nurbs curve
curve_object.set_curve_type(unreal.HoudiniPublicAPICurveType.NURBS)
# Set the points of the curve, for this example we create a helix
# consisting of 100 points
curve_points = []
for i in range(10):
t = i / 10.0 * math.pi * 2.0
x = 100.0 * math.cos(t)
y = 100.0 * math.sin(t)
z = i * 10.0
curve_points.append(unreal.Transform([x, y, z], [0, 0, 0], [1, 1, 1]))
curve_object.set_curve_points(curve_points)
# Set the curve wrapper as an input object
curve_input.set_input_objects((curve_object, ))
# Copy the input data to the HDA as node input 1
in_wrapper.set_input_at_index(1, curve_input)
# We can now discard the API input object
curve_input = None
def print_api_input(in_input):
print('\t\tInput type: {0}'.format(in_input.__class__))
print('\t\tbKeepWorldTransform: {0}'.format(in_input.keep_world_transform))
print('\t\tbImportAsReference: {0}'.format(in_input.import_as_reference))
if isinstance(in_input, unreal.HoudiniPublicAPICurveInput):
print('\t\tbCookOnCurveChanged: {0}'.format(in_input.cook_on_curve_changed))
print('\t\tbAddRotAndScaleAttributesOnCurves: {0}'.format(in_input.add_rot_and_scale_attributes_on_curves))
input_objects = in_input.get_input_objects()
if not input_objects:
print('\t\tEmpty input!')
else:
print('\t\tNumber of objects in input: {0}'.format(len(input_objects)))
for idx, input_object in enumerate(input_objects):
print('\t\t\tInput object #{0}: {1}'.format(idx, input_object))
if isinstance(input_object, unreal.HoudiniPublicAPICurveInputObject):
print('\t\t\tbClosed: {0}'.format(input_object.is_closed()))
print('\t\t\tCurveMethod: {0}'.format(input_object.get_curve_method()))
print('\t\t\tCurveType: {0}'.format(input_object.get_curve_type()))
print('\t\t\tReversed: {0}'.format(input_object.is_reversed()))
print('\t\t\tCurvePoints: {0}'.format(input_object.get_curve_points()))
def print_inputs(in_wrapper):
print('print_inputs')
# Unbind from the delegate
in_wrapper.on_post_processing_delegate.remove_callable(print_inputs)
# Fetch inputs, iterate over it and log
node_inputs = in_wrapper.get_inputs_at_indices()
parm_inputs = in_wrapper.get_input_parameters()
if not node_inputs:
print('No node inputs found!')
else:
print('Number of node inputs: {0}'.format(len(node_inputs)))
for input_index, input_wrapper in node_inputs.items():
print('\tInput index: {0}'.format(input_index))
print_api_input(input_wrapper)
if not parm_inputs:
print('No parameter inputs found!')
else:
print('Number of parameter inputs: {0}'.format(len(parm_inputs)))
for parm_name, input_wrapper in parm_inputs.items():
print('\tInput parameter name: {0}'.format(parm_name))
print_api_input(input_wrapper)
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper1, _g_wrapper2
# instantiate the input HDA with auto-cook enabled
_g_wrapper1 = api.instantiate_asset(get_pig_head_hda(), unreal.Transform())
# instantiate the copy curve HDA
_g_wrapper2 = api.instantiate_asset(get_copy_curve_hda(), unreal.Transform())
# Configure inputs on_post_instantiation, after instantiation, but before first cook
_g_wrapper2.on_post_instantiation_delegate.add_callable(configure_inputs)
# Print the input state after the cook and output creation.
_g_wrapper2.on_post_processing_delegate.add_callable(print_inputs)
if __name__ == '__main__':
run()
|
import unreal
start, rotation = unreal.EditorLevelLibrary.get_level_viewport_camera_info()
end = start + rotation.get_forward_vector() * 10000.0
actors = unreal.EditorLevelLibrary.get_all_level_actors()
# print actors[0]
# actors_except_foliage = unreal.EditorLevelLibrary.get_selected_level_actors()
# print actors_except_foliage[0]
result = unreal.SystemLibrary.box_trace_single(unreal.EditorLevelLibrary.get_editor_world(),
start, end, unreal.Vector(300.0, 300.0, 300.0), rotation,
unreal.TraceTypeQuery.TRACE_TYPE_QUERY1,
True, [], unreal.DrawDebugTrace.NONE, True,
unreal.LinearColor(1.0, 0.0, 0.0, 1.0),
unreal.LinearColor(0.0, 1.0, 0.0, 1.0))
if result:
hit_component = result.to_tuple()[10]
print hit_component
if isinstance(hit_component, unreal.FoliageInstancedStaticMeshComponent):
asset_path = hit_component.static_mesh.get_path_name()
unreal.EditorAssetLibrary.sync_browser_to_objects([asset_path])
|
# Copyright (c) 2023 Max Planck Society
# License: https://bedlam.is.tuebingen.mpg.de/license.html
#
# Render jobs in MovieRenderQueue and generate camera ground truth (extrinsics/intrinsics) information
#
# Requirements:
# Python Editor Script Plugin
# Unreal 5.0.3+
#
from pathlib import Path
import re
import sys
import unreal
# Globals
output_dir = r"/project/"
pipeline_executor = None
"""
Summary:
This function is called after the executor has finished
Params:
success - True if all jobs completed successfully.
"""
def OnQueueFinishedCallback(executor, success):
unreal.log("Render queue completed. Success: " + str(success))
# Delete our reference too so we don"t keep it alive.
global pipeline_executor
del pipeline_executor
"""
Summary:
This function is called after each individual job in the queue is finished.
At this point, PIE has been stopped so edits you make will be applied to the
editor world.
"""
def OnIndividualJobFinishedCallback(job, success):
unreal.log("Individual job completed: success=" + str(success))
# Export camera ground truth to .csv
sequence_name = job.job_name
export_camera_data(sequence_name)
def export_camera_data(sequence_name):
camera_csv_dir = Path(output_dir) / "ground_truth" / "camera"
camera_csv_dir.mkdir(parents=True, exist_ok=True)
camera_csv_path = camera_csv_dir / f"{sequence_name}_camera.csv"
unreal.log(f"BEDLAM: Exporting camera data: {camera_csv_path}")
# Open project logfile to read camera parameters
output = []
logfile_dir = unreal.Paths.project_log_dir()
project_path = unreal.Paths.get_project_file_path()
(path, logfile_name, ext) = unreal.Paths.split(project_path)
logfile_path = Path(logfile_dir) / f"{logfile_name}.log"
with open(logfile_path, "r") as fp:
lines = fp.readlines()
lines.reverse()
output = []
for line in lines:
line = line.rstrip()
if "BEDLAM_CAMERA_START" in line:
break
match = re.search(r"BEDLAM_CAMERA:(.*)", line)
if match:
output.append(match.group(1))
output.reverse()
with open(camera_csv_path, "w") as fp:
fp.write("name,x,y,z,yaw,pitch,roll,focal_length,sensor_width,sensor_height,hfov\n")
for (index, line) in enumerate(output):
match = re.search(r"(\d+),(.+)", line)
if not match:
unreal.log_error("Invalid camera data: " + line)
return False
frame = int(match.group(1))
name = f"{sequence_name}_{frame:04d}.png"
line = match.group(2)
fp.write(name + "," + line + "\n")
return True
###############################################################################
# Main
###############################################################################
if __name__ == "__main__":
unreal.log("BEDLAM: Render jobs in MovieRenderQueue and generate camera ground truth (extrinsics/intrinsics) information")
if len(sys.argv) == 2:
output_dir = sys.argv[1]
# Process queue
movie_pipeline_queue_subsystem = unreal.get_editor_subsystem(unreal.MoviePipelineQueueSubsystem)
pipeline_queue = movie_pipeline_queue_subsystem.get_queue()
for job in pipeline_queue.get_jobs():
print(f"{job}")
# This renders the queue that the subsystem belongs with the PIE executor, mimicking Render (Local)
pipeline_executor = movie_pipeline_queue_subsystem.render_queue_with_executor(unreal.MoviePipelinePIEExecutor)
pipeline_executor.on_executor_finished_delegate.add_callable_unique(OnQueueFinishedCallback)
pipeline_executor.on_individual_job_finished_delegate.add_callable_unique(OnIndividualJobFinishedCallback) # Only available on PIE Executor
|
# coding: utf-8
from asyncio.windows_events import NULL
import unreal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-vrm")
parser.add_argument("-rig")
parser.add_argument("-meta")
args = parser.parse_args()
print(args.vrm)
reg = unreal.AssetRegistryHelpers.get_asset_registry();
##
rigs = unreal.ControlRigBlueprint.get_currently_open_rig_blueprints()
print(rigs)
for r in rigs:
s:str = r.get_path_name()
ss:str = args.rig
if (s.find(ss) < 0):
print("no rig")
else:
rig = r
#rig = rigs[10]
hierarchy = unreal.ControlRigBlueprintLibrary.get_hierarchy(rig)
h_con = hierarchy.get_controller()
r_con = rig.get_controller()
graph = r_con.get_graph()
node = graph.get_nodes()
for e in hierarchy.get_controls(True):
h_con.remove_element(e)
for e in hierarchy.get_nulls(True):
h_con.remove_element(e)
boneCount = 0
for e in hierarchy.get_bones():
p = hierarchy.get_first_parent(e)
print('===')
print(e)
print(p)
t = unreal.Transform()
s = unreal.RigControlSettings()
s.shape_visible = False
v = unreal.RigControlValue()
shape_t = unreal.Transform(location=[0.0, 0.0, 0.0], rotation=[0.0, 0.0, 0.0], scale=[0.001, 0.001, 0.001])
if (boneCount == 0):
n = h_con.add_null("{}_s".format(e.name), unreal.RigElementKey(), t)
c = h_con.add_control(e.name, n, s, v)
t = hierarchy.get_global_transform(n)
hierarchy.set_global_transform(c, t, True)
hierarchy.set_control_shape_transform(c, shape_t, True)
else:
p.type = unreal.RigElementType.CONTROL
n = h_con.add_null("{}_s".format(e.name), p, t)
c = h_con.add_control(e.name, n, s, v)
t = hierarchy.get_global_transform(n)
hierarchy.set_global_transform(c, t, True)
hierarchy.set_control_shape_transform(c, shape_t, True)
if ("{}".format(e.name) == "head"):
parent = c
n = h_con.add_null("eye_l_s", parent, t)
c = h_con.add_control("eye_l", n, s, v)
t = hierarchy.get_global_transform(n)
hierarchy.set_global_transform(c, t, True)
hierarchy.set_control_shape_transform(c, shape_t, True)
n = h_con.add_null("eye_r_s", parent, t)
c = h_con.add_control("eye_r", n, s, v)
t = hierarchy.get_global_transform(n)
hierarchy.set_global_transform(c, t, True)
hierarchy.set_control_shape_transform(c, shape_t, True)
boneCount += 1
|
import unreal
from typing import Dict, Any
from utils import logging as log
import json # Import json for parsing C++ response
# Lazy load the C++ utils class to avoid issues during Unreal init
_widget_gen_utils = None
def get_widget_gen_utils():
global _widget_gen_utils
if _widget_gen_utils is None:
try:
# Attempt to load the class generated from C++
# Make sure your C++ class is compiled and accessible to Python
_widget_gen_utils = unreal.GenWidgetUtils()
except Exception as e:
log.log_error(f"Failed to get unreal.WidgetGenUtils: {e}. Ensure C++ module is compiled and loaded.")
raise # Re-raise to signal failure
return _widget_gen_utils
def handle_add_widget_to_user_widget(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handles adding a widget component to a User Widget Blueprint.
"""
try:
widget_gen_utils = get_widget_gen_utils()
user_widget_path = command.get("user_widget_path")
widget_type = command.get("widget_type")
widget_name = command.get("widget_name")
parent_name = command.get("parent_widget_name", "") # Default to empty string if not provided
log.log_command("add_widget_to_user_widget", f"Path: {user_widget_path}, Type: {widget_type}, Name: {widget_name}, Parent: {parent_name}")
if not all([user_widget_path, widget_type, widget_name]):
return {"success": False, "error": "Missing required arguments: user_widget_path, widget_type, widget_name"}
# Call the C++ function
response_str = widget_gen_utils.add_widget_to_user_widget(user_widget_path, widget_type, widget_name, parent_name)
# Parse the JSON response from C++
response_json = json.loads(response_str)
log.log_result("add_widget_to_user_widget", response_json.get("success", False), response_json.get("message") or response_json.get("error"))
return response_json
except Exception as e:
log.log_error(f"Error in handle_add_widget_to_user_widget: {str(e)}", include_traceback=True)
return {"success": False, "error": f"Python Handler Error: {str(e)}"}
def handle_edit_widget_property(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handles editing a property of a widget inside a User Widget Blueprint.
"""
try:
widget_gen_utils = get_widget_gen_utils()
user_widget_path = command.get("user_widget_path")
widget_name = command.get("widget_name")
property_name = command.get("property_name")
value_str = command.get("value") # Value is expected as a string
log.log_command("edit_widget_property", f"Path: {user_widget_path}, Widget: {widget_name}, Property: {property_name}, Value: {value_str}")
if not all([user_widget_path, widget_name, property_name, value_str is not None]):
return {"success": False, "error": "Missing required arguments: user_widget_path, widget_name, property_name, value"}
# Call the C++ function
response_str = widget_gen_utils.edit_widget_property(user_widget_path, widget_name, property_name, value_str)
# Parse the JSON response from C++
response_json = json.loads(response_str)
log.log_result("edit_widget_property", response_json.get("success", False), response_json.get("message") or response_json.get("error"))
return response_json
except Exception as e:
log.log_error(f"Error in handle_edit_widget_property: {str(e)}", include_traceback=True)
return {"success": False, "error": f"Python Handler Error: {str(e)}"}
|
import unreal
print("=== SETTING UP COMPLETE TEST LEVEL (FIXED) ===")
try:
# Load TestLevel
print("Loading TestLevel...")
level_loaded = unreal.EditorLevelLibrary.load_level('/project/')
if not level_loaded:
print("Creating new TestLevel...")
unreal.EditorLevelLibrary.new_level('/project/')
print("✓ TestLevel ready")
# Clear existing content
print("Clearing existing level content...")
try:
actors = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_all_level_actors()
for actor in actors:
if not actor.get_name().startswith('Default') and not actor.get_name().startswith('WorldSettings'):
unreal.get_editor_subsystem(unreal.EditorActorSubsystem).destroy_actor(actor)
print("✓ Level cleared")
except:
print("Note: Level clear issue - continuing")
# 1. ADD LIGHTING
print("Adding lighting...")
# Add Directional Light (Sun)
sun_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.DirectionalLight,
unreal.Vector(0, 0, 500),
unreal.Rotator(-45, 45, 0)
)
if sun_light:
light_comp = sun_light.get_component_by_class(unreal.DirectionalLightComponent)
if light_comp:
light_comp.set_editor_property('intensity', 3.0)
# Use Color instead of LinearColor
light_comp.set_editor_property('light_color', unreal.Color(255, 242, 204, 255)) # Warm sunlight
print("✓ Sun light added")
# Add Sky Light
sky_light = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.SkyLight,
unreal.Vector(0, 0, 600),
unreal.Rotator(0, 0, 0)
)
if sky_light:
sky_comp = sky_light.get_component_by_class(unreal.SkyLightComponent)
if sky_comp:
sky_comp.set_editor_property('intensity', 1.0)
sky_comp.set_editor_property('light_color', unreal.Color(102, 153, 255, 255)) # Blue sky
print("✓ Sky light added")
# 2. ADD LARGE FLOOR
print("Adding main floor...")
main_floor = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
unreal.Vector(0, 0, 0),
unreal.Rotator(0, 0, 0)
)
if main_floor:
mesh_comp = main_floor.get_component_by_class(unreal.StaticMeshComponent)
if mesh_comp:
cube_mesh = unreal.EditorAssetLibrary.load_asset('/project/')
if cube_mesh:
mesh_comp.set_static_mesh(cube_mesh)
# Scale: X=50 (length), Y=50 (width), Z=0.1 (very thin floor)
main_floor.set_actor_scale3d(unreal.Vector(50, 50, 0.1))
main_floor.set_actor_label("MainFloor")
print("✓ Main floor (5000x5000 units) added")
# 3. ADD PLATFORMS
print("Adding platforms...")
platforms = [
(unreal.Vector(1000, 0, 150), unreal.Vector(8, 8, 1), "Platform1"),
(unreal.Vector(2000, 500, 300), unreal.Vector(6, 6, 1), "Platform2"),
(unreal.Vector(-1000, 0, 150), unreal.Vector(8, 8, 1), "Platform3"),
(unreal.Vector(-2000, -500, 300), unreal.Vector(6, 6, 1), "Platform4"),
(unreal.Vector(0, 1500, 200), unreal.Vector(10, 4, 1), "Bridge1"),
(unreal.Vector(0, -1500, 200), unreal.Vector(10, 4, 1), "Bridge2"),
]
for pos, scale, name in platforms:
platform = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
pos,
unreal.Rotator(0, 0, 0)
)
if platform:
mesh_comp = platform.get_component_by_class(unreal.StaticMeshComponent)
if mesh_comp:
cube_mesh = unreal.EditorAssetLibrary.load_asset('/project/')
if cube_mesh:
mesh_comp.set_static_mesh(cube_mesh)
platform.set_actor_scale3d(scale)
platform.set_actor_label(name)
print("✓ 6 platforms added")
# 4. ADD BOUNDARY WALLS
print("Adding walls...")
walls = [
(unreal.Vector(0, 2500, 200), unreal.Vector(50, 1, 4), "WallFront"),
(unreal.Vector(0, -2500, 200), unreal.Vector(50, 1, 4), "WallBack"),
(unreal.Vector(2500, 0, 200), unreal.Vector(1, 50, 4), "WallRight"),
(unreal.Vector(-2500, 0, 200), unreal.Vector(1, 50, 4), "WallLeft"),
]
for pos, scale, name in walls:
wall = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.StaticMeshActor,
pos,
unreal.Rotator(0, 0, 0)
)
if wall:
mesh_comp = wall.get_component_by_class(unreal.StaticMeshComponent)
if mesh_comp:
cube_mesh = unreal.EditorAssetLibrary.load_asset('/project/')
if cube_mesh:
mesh_comp.set_static_mesh(cube_mesh)
wall.set_actor_scale3d(scale)
wall.set_actor_label(name)
print("✓ Boundary walls added")
# 5. ADD PLAYER START
print("Adding player start...")
player_start = unreal.EditorLevelLibrary.spawn_actor_from_class(
unreal.PlayerStart,
unreal.Vector(0, 0, 100),
unreal.Rotator(0, 0, 0)
)
if player_start:
player_start.set_actor_label("PlayerSpawn")
print("✓ Player start added")
# 6. TRY TO PLACE CHARACTER
print("Attempting to place character...")
try:
character_bp = unreal.EditorAssetLibrary.load_asset('/project/')
if character_bp:
character_class = character_bp.get_blueprint_generated_class()
if character_class:
character = unreal.EditorLevelLibrary.spawn_actor_from_class(
character_class,
unreal.Vector(200, 0, 100),
unreal.Rotator(0, 0, 0)
)
if character:
character.set_actor_label("WarriorCharacter")
print("✓ Character Blueprint placed")
else:
print("Note: Character Blueprint may need manual configuration")
else:
print("Note: Character will use C++ class via game mode")
else:
print("Note: Character will use C++ class via game mode")
except Exception as e:
print(f"Note: Character will use C++ class: {e}")
# 7. SAVE LEVEL
print("Saving level...")
unreal.EditorLevelLibrary.save_current_level()
print("✓ Level saved")
print("\n" + "="*50)
print("✅ COMPLETE LEVEL SETUP FINISHED!")
print("="*50)
print("\nTestLevel now contains:")
print("🌞 Lighting system (directional + sky)")
print("🏗️ Large main floor (5000x5000 units)")
print("🏃 6 platforms for jumping")
print("🧱 4 boundary walls")
print("📍 Player start point")
print("🎮 Character ready (C++ or Blueprint)")
print("\n🚀 Ready to test!")
print(" 1. Open TestLevel in editor")
print(" 2. Click Play")
print(" 3. Use WASD to move")
print(" 4. Use arrows/mouse to attack")
except Exception as e:
print(f"✗ ERROR: {e}")
import traceback
traceback.print_exc()
# Force exit
print("Exiting...")
unreal.SystemLibrary.execute_console_command(None, "exit")
|
import unreal
import os
# 本脚本对于选中的Static Mesh创建一个蓝图
# 蓝图的父类为'/project/'
selectionAssets = unreal.EditorUtilityLibrary().get_selected_assets()
static_mesh_assets = []
asset_paths = [item.get_path_name() for item in selectionAssets]
# 筛选所有static mesh
for asset_path in asset_paths:
asset = unreal.EditorAssetLibrary.load_asset(asset_path.split('.')[0])
if asset and isinstance(asset, unreal.StaticMesh):
static_mesh_assets.append(asset)
else:
unreal.log("Asset is not a Static Mesh: %s" % asset_path)
# 加载父类蓝图
parent_blueprint_class = unreal.EditorAssetLibrary.load_blueprint_class('/project/')
blueprint_factory = unreal.BlueprintFactory()
# TODO:修改父类,可以改为unreal.Actor()
blueprint_factory.set_editor_property("parent_class", parent_blueprint_class)
asset_tools: unreal.AssetTools = unreal.AssetToolsHelpers.get_asset_tools()
# 每一个static mesh创建一个蓝图
for static_mesh in static_mesh_assets:
blueprint_name = 'BP_' + static_mesh.get_name() # BP_SM_Merge_180_110A101CD12
static_mesh_path = static_mesh.get_path_name() # /project/.SM_Merge_180_110A101CD12
blueprint_directory = '/'.join(static_mesh_path.split('/')[:-1]) + '/' # /project/
blueprint_path = blueprint_directory + blueprint_name # /project/
blueprint_generate_class_path = blueprint_path + '_C' # /project/
# 如果蓝图存在,则删除
if unreal.EditorAssetLibrary.does_asset_exist(blueprint_path):
unreal.EditorAssetLibrary.delete_asset(blueprint_path)
unreal.log_warning("Asset already exists, deleting and new one will be replaced.")
# 创建蓝图
new_blueprint_asset = asset_tools.create_asset(blueprint_name, blueprint_directory, unreal.Blueprint.static_class(), blueprint_factory)
if new_blueprint_asset:
unreal.log("新蓝图已创建: {}".format(blueprint_path))
else:
unreal.log_error("创建蓝图失败: {}".format(blueprint_path))
|
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import unreal
from Utilities.Utils import Singleton
import random
import re
if sys.platform == "darwin":
import webbrowser
class ChameleonGallery(metaclass=Singleton):
def __init__(self, jsonPath):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_scrollbox = "ScrollBox"
self.ui_crumbname = "SBreadcrumbTrailA"
self.ui_image = "SImageA"
self.ui_image_local = "SImage_ImageFromRelativePath"
self.ui_imageB = "SImage_ImageFromPath"
self.ui_progressBar = "ProgressBarA"
self.ui_drop_target_text_box = "DropResultBox"
self.ui_python_not_ready = "IsPythonReadyImg"
self.ui_python_is_ready = "IsPythonReadyImgB"
self.ui_is_python_ready_text = "IsPythonReadyText"
self.ui_details_view = "DetailsView"
self.ui_color_block = "ColorBlock"
self.ui_button_expand_color_picker = "ButtonExpandColorPicker"
self.ui_color_picker = "ColorPicker"
self.ui_dpi_scaler = "DPIScaler"
self.imageFlagA = 0
self.imageFlagB = 0
# set data in init
self.set_random_image_data()
self.data.set_combo_box_items('CombBoxA', ['1', '3', '5'])
self.data.set_object(self.ui_details_view, self.data)
self.is_color_picker_shown = self.data.get_visibility(self.ui_color_picker) == "Visible"
self.linearColor_re = re.compile(r"\(R=([-\d.]+),G=([-\d.]+),B=([-\d.]+),A=([-\d.]+)\)")
self.tapython_version = dict(unreal.PythonBPLib.get_ta_python_version())
def mark_python_ready(self):
print("mark_python_ready call")
self.data.set_visibility(self.ui_python_not_ready, "Collapsed")
self.data.set_visibility(self.ui_python_is_ready, "Visible")
self.data.set_text(self.ui_is_python_ready_text, "Python Path Ready.")
def push_breadcrumb(self):
count = self.data.get_breadcrumbs_count_string(self.ui_crumbname)
strs = "is breadcrumb tail from alice in wonder world"
label = strs.split()[count % len(strs.split())]
self.data.push_breadcrumb_string(self.ui_crumbname, label, label)
def set_random_image_data(self):
width = 64
height = 64
colors = [unreal.LinearColor(1, 1, 1, 1) if random.randint(0, 1) else unreal.LinearColor(0, 0, 0, 1) for _ in range(width * height)]
self.data.set_image_pixels(self.ui_image, colors, width, height)
def set_random_progress_bar_value(self):
self.data.set_progress_bar_percent(self.ui_progressBar,random.random())
def change_local_image(self):
self.data.set_image_from(self.ui_image_local, ["Images/ChameleonLogo_c.png", "Images/ChameleonLogo_b.png"][self.imageFlagA])
self.imageFlagA = (self.imageFlagA + 1) % 2
def change_image(self):
self.data.set_image_from_path(self.ui_imageB, ["PythonChameleonIcon_128x.png", "Icon128.png"][self.imageFlagB])
self.imageFlagB = (self.imageFlagB + 1) % 2
def change_comboBox_items(self):
offset = random.randint(1, 10)
items = [str(v+offset) for v in range(random.randint(1, 10))]
self.data.set_combo_box_items("CombBoxA", items)
def launch_other_galleries(self):
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'auto_gen/border_brushes_Gallery.json')):
unreal.PythonBPLib.notification("auto-generated Galleries not exists", info_level=1)
return
gallery_paths = ['ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json'
]
bLaunch = unreal.PythonBPLib.confirm_dialog(f'Open Other {len(gallery_paths)} Galleries? You can close them with the "Close all Gallery" Button' , "Open Other Galleries", with_cancel_button=False)
if bLaunch:
with unreal.ScopedSlowTask(len(gallery_paths), "Spawn Actors") as slow_task:
slow_task.make_dialog(True)
for i, p in enumerate(gallery_paths):
slow_task.enter_progress_frame(1, f"Launch Gallery: {p}")
unreal.ChameleonData.launch_chameleon_tool(p)
def request_close_other_galleries(self):
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'auto_gen/border_brushes_Gallery.json')):
unreal.PythonBPLib.notification("auto-generated Galleries not exists", info_level=1)
return
gallery_paths = ['ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json',
'ChameleonGallery/project/.json'
]
for i, p in enumerate(gallery_paths):
unreal.ChameleonData.request_close(p)
# unreal.ChameleonData.request_close('/project/.json')
exists_tools_var = [globals()[x] for x in globals() if "Utilities.Utils.Singleton" in str(type(type(globals()[x])))]
def on_drop(self, assets, assets_folders, actors):
str_for_show = ""
for items, name in zip([assets, assets_folders, actors], ["Assets:", "Assets Folders:", "Actors:"]):
if items:
str_for_show += f"{name}\n"
for item in items:
str_for_show += f"\t{item}\n"
self.data.set_text(self.ui_drop_target_text_box, str_for_show)
print(f"str_for_show: {str_for_show}")
def on_drop_func(self, *args, **kwargs):
print(f"args: {args}")
print(f"kwargs: {kwargs}")
str_for_show = ""
for name, items in kwargs.items():
if items:
str_for_show += f"{name}:\n"
for item in items:
str_for_show += f"\t{item}\n"
self.data.set_text(self.ui_drop_target_text_box, str_for_show)
def get_full_size_of_this_chameleon(self):
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
scrollbox_offsets = self.data.get_scroll_box_offsets(self.ui_scrollbox)
height_full = scrollbox_offsets["ScrollOffsetOfEnd"] / (1.0-scrollbox_offsets["viewFraction"])
height_full += 48
print(f"delta: {height_full} - {round(height_full)}")
return current_size.x, round(height_full)
def on_button_ChangeTabSize_click(self, offset_pixel):
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
print(f"currentSize: {current_size}")
offsets = self.data.get_scroll_box_offsets(self.ui_scrollbox)
print(offsets)
if current_size:
current_size.x += offset_pixel
unreal.ChameleonData.set_chameleon_window_size("ChameleonGallery/ChameleonGallery.json", current_size)
def on_button_FlashWindow_click(self):
unreal.ChameleonData.flash_chameleon_window("ChameleonGallery/ChameleonGallery.json")
def on_button_Snapshot_click(self):
full_size = self.get_full_size_of_this_chameleon()
print(f"try save snapshot @ {full_size}")
saved_file_path = unreal.ChameleonData.snapshot_chameleon_window(self.jsonPath, unreal.Vector2D(*full_size))
if saved_file_path:
unreal.PythonBPLib.notification(f"UI Snapshot Saved:", hyperlink_text = saved_file_path
, on_hyperlink_click_command = f'chameleon_gallery.explorer("{saved_file_path}")')
else:
unreal.PythonBPLib.notification(f"Save UI snapshot failed.", info_level = 1)
def explorer(self, file_path):
if sys.platform == "darwin":
webbrowser.open(os.path.dirname(file_path))
else:
file_path = file_path.replace("/", "\\")
subprocess.call('explorer "{}" '.format(os.path.dirname(file_path)))
def set_selected_actor_to_details_view(self):
selected = unreal.get_editor_subsystem(unreal.EditorActorSubsystem).get_selected_level_actors()
if selected:
self.data.set_object(self.ui_details_view, selected[0])
else:
print("Selected None")
def on_expand_color_picker_click(self):
self.data.set_visibility(self.ui_color_picker, "Collapsed" if self.is_color_picker_shown else "Visible")
self.data.set_text(self.ui_button_expand_color_picker, "Expand ColorPicker" if self.is_color_picker_shown else "Collapse ColorPicker")
self.is_color_picker_shown = not self.is_color_picker_shown
current_size = unreal.ChameleonData.get_chameleon_window_size(self.jsonPath)
if current_size.x < 650:
current_size.x = 650
unreal.ChameleonData.set_chameleon_window_size("ChameleonGallery/ChameleonGallery.json", current_size)
def on_color_picker_commit(self, color_str):
v = [float(a) for a in self.linearColor_re.match(color_str).groups()]
self.data.set_color(self.ui_color_block, unreal.LinearColor(*v))
def change_dpi_scaler_value(self, value):
if self.tapython_version["Minor"] < 2 or(
self.tapython_version["Minor"] == 2 and self.tapython_version["Patch"] < 1
):
print("Need TAPython version >= 1.2.1")
return
self.data.set_dpi_scale(self.ui_dpi_scaler, value + 0.5)
|
import unreal
def log_hello_unreal():
'''
logs hello unreal to the output log in unreal.
'''
unreal.log_warning("hello unreal")
log_hello_unreal()
# import sequencer_examples
# unreal.log_warning("sequencer_examples is loaded!")
# import sequencer_fbx_examples
# unreal.log_warning("sequencer_fbx_examples is loaded!")
# import sequencer_key_examples
# unreal.log_warning("sequencer_key_examples is loaded!")
|
import unreal
import sys
from os.path import dirname, basename, isfile, join
import glob
import importlib
import traceback
dir_name = dirname(__file__)
dir_basename = basename(dir_name)
modules = glob.glob(join(dir_name, "*.py"))
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
unreal.log("""@
####################
Load UI Library
####################
""")
for m in __all__:
# __import__(m, locals(), globals())
try:
mod = importlib.import_module("{}.{}".format(dir_basename, m))
if m in globals():
unreal.log("""@
####################
ReLoad UI Library
####################
""")
try:
reload(mod)
except:
importlib.reload(mod)
unreal.log("Successfully import {}".format(m))
except Exception as why:
unreal.log_error("Fail to import {}.\n {}".format(m, why))
traceback.print_exc(file=sys.stdout)
unreal.log("""@
####################
""")
|
# coding: utf-8
import unreal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-vrm")
parser.add_argument("-rig")
parser.add_argument("-meta")
args = parser.parse_args()
#print(args.vrm)
humanoidBoneList = [
"hips",
"leftUpperLeg",
"rightUpperLeg",
"leftLowerLeg",
"rightLowerLeg",
"leftFoot",
"rightFoot",
"spine",
"chest",
"upperChest",
"neck",
"head",
"leftShoulder",
"rightShoulder",
"leftUpperArm",
"rightUpperArm",
"leftLowerArm",
"rightLowerArm",
"leftHand",
"rightHand",
"leftToes",
"rightToes",
"leftEye",
"rightEye",
"jaw",
"leftThumbProximal",
"leftThumbIntermediate",
"leftThumbDistal",
"leftIndexProximal",
"leftIndexIntermediate",
"leftIndexDistal",
"leftMiddleProximal",
"leftMiddleIntermediate",
"leftMiddleDistal",
"leftRingProximal",
"leftRingIntermediate",
"leftRingDistal",
"leftLittleProximal",
"leftLittleIntermediate",
"leftLittleDistal",
"rightThumbProximal",
"rightThumbIntermediate",
"rightThumbDistal",
"rightIndexProximal",
"rightIndexIntermediate",
"rightIndexDistal",
"rightMiddleProximal",
"rightMiddleIntermediate",
"rightMiddleDistal",
"rightRingProximal",
"rightRingIntermediate",
"rightRingDistal",
"rightLittleProximal",
"rightLittleIntermediate",
"rightLittleDistal",
]
for i in range(len(humanoidBoneList)):
humanoidBoneList[i] = humanoidBoneList[i].lower()
######
rigs = unreal.ControlRigBlueprint.get_currently_open_rig_blueprints()
#print(rigs)
for r in rigs:
s:str = r.get_path_name()
ss:str = args.rig
if (s.find(ss) < 0):
print("no rig")
else:
rig = r
print(unreal.SystemLibrary.get_engine_version())
if (unreal.SystemLibrary.get_engine_version()[0] == '5'):
c = rig.get_controller()#rig.controller
else:
c = rig.controller
g = c.get_graph()
n = g.get_nodes()
print(n)
#c.add_branch_node()
#c.add_array_pin()
a:unreal.RigUnit_CollectionItems = unreal.RigUnit_CollectionItems()
# print(a)
# 配列ノード追加
collectionItem_forControl:unreal.RigVMStructNode = None
collectionItem_forBone:unreal.RigVMStructNode = None
for node in n:
if (node.get_node_title() == 'Items' or node.get_node_title() == 'Collection from Items'):
#print(node.get_node_title())
#node = unreal.RigUnit_CollectionItems.cast(node)
pin = node.find_pin('Items')
print(pin.get_array_size())
print(pin.get_default_value())
if (pin.get_array_size() < 40):
continue
if 'Type=Bone' in pin.get_default_value():
collectionItem_forBone= node
if 'Type=Control' in pin.get_default_value():
collectionItem_forControl = node
#nn = unreal.EditorFilterLibrary.by_class(n,unreal.RigUnit_CollectionItems.static_class())
## meta 取得
reg = unreal.AssetRegistryHelpers.get_asset_registry();
a = reg.get_all_assets();
if (args.meta):
for aa in a:
if (aa.get_editor_property("object_path") == args.meta):
v:unreal.VrmMetaObject = aa
vv = aa.get_asset()
if (vv == None):
for aa in a:
if (aa.get_editor_property("object_path") == args.vrm):
v:unreal.VrmAssetListObject = aa
vv = v.get_asset().vrm_meta_object
#print(vv)
meta = vv
# controller array
if (collectionItem_forControl == None):
collectionItem_forControl = c.add_struct_node(unreal.RigUnit_CollectionItems.static_struct(), method_name='Execute')
items_forControl = collectionItem_forControl.find_pin('Items')
c.clear_array_pin(items_forControl.get_pin_path())
# bone array
if (collectionItem_forBone == None):
collectionItem_forBone = c.add_struct_node(unreal.RigUnit_CollectionItems.static_struct(), method_name='Execute')
items_forBone = collectionItem_forBone.find_pin('Items')
c.clear_array_pin(items_forBone.get_pin_path())
## h_mod
rigs = unreal.ControlRigBlueprint.get_currently_open_rig_blueprints()
rig = rigs[0]
print(items_forControl)
print(items_forBone)
humanoidBoneTable = {"dummy" : "dummy"}
humanoidBoneTable.clear()
for h in meta.humanoid_bone_table:
bone_h = "{}".format(h).lower()
bone_m = "{}".format(meta.humanoid_bone_table[h]).lower()
try:
i = list(humanoidBoneTable.values()).index(bone_m)
except:
i = -1
if (bone_h!="" and bone_m!="" and i==-1):
humanoidBoneTable[bone_h] = bone_m
for bone_h in humanoidBoneList:
bone_m = humanoidBoneTable.get(bone_h, None)
if bone_m == None:
continue
#for bone_h in meta.humanoid_bone_table:
# bone_m = meta.humanoid_bone_table[bone_h]
# try:
# i = humanoidBoneList.index(bone_h.lower())
# except:
# i = -1
# if (i >= 0):
if (True):
tmp = '(Type=Bone,Name='
#tmp += "{}".format(bone_m).lower()
tmp += bone_m
tmp += ')'
c.add_array_pin(items_forBone.get_pin_path(), default_value=tmp)
#print(bone_m)
tmp = '(Type=Control,Name='
#tmp += "{}".format(bone_h).lower() + '_c'
tmp += bone_h + '_c'
tmp += ')'
#print(c)
c.add_array_pin(items_forControl.get_pin_path(), default_value=tmp)
#print(bone_h)
#for e in h_mod.get_elements():
# if (e.type == unreal.RigElementType.CONTROL):
# tmp = '(Type=Control,Name='
# tmp += "{}".format(e.name)
# tmp += ')'
# c.add_array_pin(items_forControl.get_pin_path(), default_value=tmp)
# print(e.name)
# if (e.type == unreal.RigElementType.BONE):
# tmp = '(Type=Bone,Name='
# tmp += "{}".format(e.name)
# tmp += ')'
# c.add_array_pin(items_forBone.get_pin_path(), default_value=tmp)
# print(e.name)
#print(i.get_all_pins_recursively())
#ii:unreal.RigUnit_CollectionItems = n[1]
#pp = ii.get_editor_property('Items')
#print(pp)
#print(collectionItem.get_all_pins_recursively()[0])
#i.get_editor_property("Items")
#c.add_array_pin("Execute")
# arrayを伸ばす
#i.get_all_pins_recursively()[0].get_pin_path()
#c.add_array_pin(i.get_all_pins_recursively()[0].get_pin_path(), default_value='(Type=Bone,Name=Global)')
#rig = rigs[10]
|
# -*- coding: utf-8 -*-
import unreal
import os
from Utilities.Utils import Singleton
from Utilities.Utils import cast
import Utilities
import QueryTools
import re
import types
import collections
from .import Utils
global _r
COLUMN_COUNT = 2
class DetailData(object):
def __init__(self):
self.filter_str = ""
self.filteredIndexToIndex = []
self.hisCrumbObjsAndNames = [] #list[(obj, propertyName)]
self.attributes = None
self.filtered_attributes = None
self.plains = []
self.riches = []
self.selected = set()
def check_line_id(self, line_id, column_count):
from_line = line_id * column_count
to_line = (line_id + 1) * column_count
assert len(self.plains) == len(self.riches), "len(self.plains) != len(self.riches)"
if 0 <= from_line < len(self.plains) and 0 <= to_line <= len(self.plains):
return True
else:
unreal.log_error(f"Check Line Id Failed: {line_id}, plains: {len(self.plains)}, rich: {len(self.riches)}")
return False
def get_plain(self, line_id, column_count):
assert self.check_line_id(line_id, column_count), "check line id failed."
return self.plains[line_id * 2 : line_id * 2 + 2]
def get_rich(self, line_id, column_count):
assert self.check_line_id(line_id, column_count), "check line id failed."
return self.riches[line_id * 2: line_id * 2 + 2]
class ObjectDetailViewer(metaclass=Singleton):
def __init__(self, jsonPath):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_checkbox_single_mode = "CheckBoxSingleMode"
self.ui_checkbox_compare_mode = "CheckBoxCompareMode"
self.ui_left_group = "LeftDetailGroup"
self.ui_right_group = "RightDetailGroup"
self.ui_button_refresh = "RefreshCompareButton"
self.ui_detailListLeft = "ListViewLeft"
self.ui_detailListRight = "ListViewRight"
self.ui_hisObjsBreadcrumbLeft = 'ObjectHisBreadcrumbLeft'
self.ui_hisObjsBreadcrumbRight = 'ObjectHisBreadcrumbRight'
# self.ui_headRowLeft = "HeaderRowLeft"
# self.ui_headRowRight = "HeaderRowRight"
self.ui_labelLeft = "LabelLeft"
self.ui_labelRight = "LabelRight"
self.ui_info_output = "InfoOutput"
self.ui_rightButtonsGroup = "RightButtonsGroup" # used for compare mode
self.ui_rightListGroup = "RightListGroup"
self.ui_refreshButtonGroup = "RefreshButtonGroup"
self.reset()
def on_close(self):
self.reset()
def on_map_changed(self, map_change_type_str):
# remove the reference, avoid memory leaking when load another map.
if map_change_type_str == "TearDownWorld":
self.reset(bResetParameter=False)
else:
pass # skip: LoadMap, SaveMap, NewMap
def reset(self, bResetParameter=True):
if bResetParameter:
self.showBuiltin = True
self.showOther = True
self.showProperties = True
self.showEditorProperties = True
self.showParamFunction = True
self.compareMode = False
self.left = None
self.right = None
self.leftSearchText = ""
self.rightSearchText = ""
self.left_rich = None
self.left_plain = None
self.var = None
self.diff_count = 0
self.clear_ui_info()
def clear_ui_info(self):
for text_ui in [self.ui_info_output, self.ui_labelLeft, self.ui_labelRight]:
self.data.set_text(text_ui, "")
self.data.set_list_view_multi_column_items(self.ui_detailListLeft, [], 2)
self.data.set_list_view_multi_column_items(self.ui_detailListRight, [], 2)
for ui_breadcrumb in [self.ui_hisObjsBreadcrumbRight, self.ui_hisObjsBreadcrumbLeft]:
crumbCount = self.data.get_breadcrumbs_count_string(ui_breadcrumb)
for i in range(crumbCount):
self.data.pop_breadcrumb_string(ui_breadcrumb)
def update_log_text(self, bRight):
bShowRight = self.compareMode
result = ""
for side_str in ["left", "right"] if bShowRight else ["left"]:
bRight = side_str != "left"
ui_breadcrumb = self.ui_hisObjsBreadcrumbRight if bRight else self.ui_hisObjsBreadcrumbLeft
breadcrumbs = self.right.hisCrumbObjsAndNames if bRight else self.left.hisCrumbObjsAndNames
crumbCount = self.data.get_breadcrumbs_count_string(ui_breadcrumb)
if bRight:
result += "\t\t\t"
result += "{} crumb: {} hisObj: {}".format(side_str, crumbCount, len(breadcrumbs))
if self.compareMode:
result = f"{result}\t\t\tdiff count: {self.diff_count}"
self.data.set_text(self.ui_info_output, result)
def get_color_by(self, attr : Utils.attr_detail):
if attr.bCallable_builtin:
return "DarkTurquoise".lower()
if attr.bCallable_other:
return "RoyalBlue".lower()
if attr.bEditorProperty:
return "LimeGreen".lower()
if attr.bOtherProperty:
return "yellow"
def get_color(self, typeStr):
if typeStr == "property":
return 'white'
if typeStr == "return_type":
return 'gray'
if typeStr == "param":
return 'gray'
def get_name_with_rich_text(self, attr:Utils.attr_detail):
name_color = self.get_color_by(attr)
param_color = self.get_color("param")
return_type_color = self.get_color("return_type")
if attr.bProperty:
return "\t<RichText.{}>{}</>".format(name_color, attr.name)
else:
if attr.param_str:
return "\t<RichText.{}>{}(</><RichText.{}>{}</><RichText.{}>)</>".format(name_color, attr.name
, param_color, attr.param_str
, name_color)
else:
if attr.bCallable_other:
return "\t<RichText.{}>{}</>".format(name_color, attr.name)
else:
return "\t<RichText.{}>{}()</><RichText.{}> {}</>".format(name_color, attr.name
, return_type_color, attr.return_type_str)
def get_name_with_plain_text(self, attr:Utils.attr_detail):
if attr.bProperty:
return "\t{}".format(attr.name)
else:
if attr.param_str:
return "\t{}({})".format( attr.name, attr.param_str)
else:
if attr.bCallable_other:
return "\t{}".format( attr.name)
else:
return "\t{}() {}".format(attr.name,attr.return_type_str)
def filter(self, data:DetailData):
result = []
indices = []
for i, attr in enumerate(data.attributes):
if not self.showEditorProperties and attr.bEditorProperty:
continue
if not self.showProperties and attr.bOtherProperty:
continue
if not self.showParamFunction and attr.bHasParamFunction:
continue
if not self.showBuiltin and attr.bCallable_builtin:
continue
if not self.showOther and attr.bCallable_other:
continue
if data.filter_str:
if data.filter_str.lower() not in attr.display_result.lower() and data.filter_str not in attr.display_name.lower() :
continue
result.append(attr)
indices.append(i)
return result, indices
def show_data(self, data:DetailData, ui_listView):
flatten_list_items = []
flatten_list_items_plain = []
for i, attr in enumerate(data.filtered_attributes):
# print(f"{i}: {attr.name} {attr.display_name}, {attr.display_result} ")
attr.check()
assert attr.display_name, f"display name null {attr.display_name}"
assert isinstance(attr.display_result, str), f"display result null {attr.display_result}"
result_str = attr.display_result
if len(result_str) > 200:
result_str = result_str[:200] + "......"
flatten_list_items.extend([self.get_name_with_rich_text(attr), result_str])
flatten_list_items_plain.extend([self.get_name_with_plain_text(attr), result_str])
data.riches = flatten_list_items
data.plains = flatten_list_items_plain
data.selected.clear()
self.data.set_list_view_multi_column_items(ui_listView, flatten_list_items, 2)
def query_and_push(self, obj, propertyName, bPush, bRight): #bPush: whether add Breadcrumb nor not, call by property
if bRight:
ui_Label = self.ui_labelRight
ui_listView = self.ui_detailListRight
ui_breadcrumb = self.ui_hisObjsBreadcrumbRight
else:
ui_Label = self.ui_labelLeft
ui_listView = self.ui_detailListLeft
ui_breadcrumb = self.ui_hisObjsBreadcrumbLeft
data = self.right if bRight else self.left
data.attributes = Utils.ll(obj)
data.filtered_attributes, data.filteredIndexToIndex = self.filter(data)
self.show_data(data, ui_listView)
# set breadcrumb
if propertyName and len(propertyName) > 0:
label = propertyName
else:
if isinstance(obj, unreal.Object):
label = obj.get_name()
else:
try:
label = obj.__str__()
except TypeError:
label = f"{obj}"
if bPush: # push
# print(f"%%% push: {propertyName}, label {label}")
data.hisCrumbObjsAndNames.append((obj, propertyName))
self.data.push_breadcrumb_string(ui_breadcrumb, label, label)
self.data.set_text(ui_Label, "{} type: {}".format(label, type(obj)) )
crumbCount = self.data.get_breadcrumbs_count_string(ui_breadcrumb)
if bRight:
assert len(self.right.hisCrumbObjsAndNames) == crumbCount, "hisCrumbObjsAndNames count not match {} {}".format(len(self.right.hisCrumbObjsAndNames), crumbCount)
else:
assert len(self.left.hisCrumbObjsAndNames) == crumbCount, "hisCrumbObjsAndNames count not match {} {}".format(len(self.left.hisCrumbObjsAndNames), crumbCount)
self.update_log_text(bRight)
def clear_and_query(self, obj, bRight):
# first time query
self.data.clear_breadcrumbs_string(self.ui_hisObjsBreadcrumbRight if bRight else self.ui_hisObjsBreadcrumbLeft)
if not self.right:
self.right = DetailData()
if not self.left:
self.left = DetailData()
data = self.right if bRight else self.left
data.hisCrumbObjsAndNames = [] #clear his-Object at first time query
if bRight:
assert len(self.right.hisCrumbObjsAndNames) == 0, "len(self.right.hisCrumbObjsAndNames) != 0"
else:
assert len(self.left.hisCrumbObjsAndNames) == 0, "len(self.left.hisCrumbObjsAndNames) != 0"
self.query_and_push(obj, "", bPush=True, bRight= bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight)
def update_ui_by_mode(self):
self.data.set_is_checked(self.ui_checkbox_compare_mode, self.compareMode)
self.data.set_is_checked(self.ui_checkbox_single_mode, not self.compareMode)
bCollapsed = not self.compareMode
self.data.set_collapsed(self.ui_rightButtonsGroup, bCollapsed)
self.data.set_collapsed(self.ui_right_group, bCollapsed)
self.data.set_collapsed(self.ui_button_refresh, bCollapsed)
def on_checkbox_SingleMode_Click(self, state):
self.compareMode = False
self.update_ui_by_mode()
def on_checkbox_CompareMode_Click(self, state):
self.compareMode = True
self.update_ui_by_mode()
def on_button_Refresh_click(self):
self.apply_compare_if_needed()
def on_button_SelectAsset_click(self, bRightSide):
selectedAssets = Utilities.Utils.get_selected_assets()
if len(selectedAssets) == 0:
return
self.clear_and_query(selectedAssets[0], bRightSide)
def on_button_QuerySelected_click(self, bRightSide):
# query component when any component was selected, otherwise actor
obj = Utilities.Utils.get_selected_comp()
if not obj:
obj = Utilities.Utils.get_selected_actor()
if obj:
self.clear_and_query(obj, bRightSide)
def on_drop(self, bRightSide, *args, **kwargs):
if "assets" in kwargs and kwargs["assets"]:
asset = unreal.load_asset(kwargs["assets"][0])
if asset:
self.clear_and_query(asset, bRightSide)
return
if "actors" in kwargs and kwargs["actors"]:
actor = unreal.PythonBPLib.find_actor_by_name(kwargs["actors"][0], unreal.EditorLevelLibrary.get_editor_world())
if actor:
print(actor)
self.clear_and_query(actor, bRightSide)
return
item_count = 0
for k, v in kwargs.items():
item_count += len(v)
if item_count == 0:
selected_comp = Utilities.Utils.get_selected_comp()
if selected_comp:
self.clear_and_query(selected_comp, bRightSide)
def log_r_warning(self):
unreal.log_warning("Assign the global var: '_r' with the MenuItem: 'select X --> _r' on Python Icon menu")
def on_button_Query_R_click(self, r_obj, bRightSide=False):
print("on_button_Query_R_click call")
if not r_obj:
return
self.clear_and_query(r_obj, bRightSide)
def on_list_double_click_do(self, index, bRight):
# print ("on_listview_DetailList_mouse_button_double_click {} bRight: {}".format(index, bRight))
data = self.right if bRight else self.left
typeBlacklist = [int, float, str, bool] #, types.NotImplementedType]
real_index = data.filteredIndexToIndex[index] if data.filteredIndexToIndex else index
assert 0 <= real_index < len(data.attributes)
currentObj, _ = data.hisCrumbObjsAndNames[len(data.hisCrumbObjsAndNames) - 1]
attr_name = data.attributes[real_index].name
objResult, propertyName = self.try_get_object(data, currentObj, attr_name)
if not objResult or objResult is currentObj: # equal
return
if isinstance(objResult, str) and "skip call" in objResult.lower():
return
if type(objResult) in typeBlacklist:
return
if isinstance(objResult, collections.abc.Iterable):
if type(objResult[0]) in typeBlacklist:
return
nextObj = objResult[0]
nextPropertyName = str(propertyName) + "[0]"
else:
nextObj = objResult
nextPropertyName = str(propertyName)
self.query_and_push(nextObj, nextPropertyName, bPush=True, bRight=bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight)
def on_listview_DetailListRight_mouse_button_double_click(self, index):
self.on_list_double_click_do(index, bRight=True)
def on_listview_DetailListLeft_mouse_button_double_click(self, index):
self.on_list_double_click_do(index, bRight=False)
def on_breadcrumbtrail_click_do(self, item, bRight):
ui_hisObjsBreadcrumb = self.ui_hisObjsBreadcrumbRight if bRight else self.ui_hisObjsBreadcrumbLeft
data = self.right if bRight else self.left
count = self.data.get_breadcrumbs_count_string(ui_hisObjsBreadcrumb)
print ("on_breadcrumbtrail_ObjectHis_crumb_click: {} count: {} len(data.hisCrumbObjsAndNames): {}".format(item, count, len(data.hisCrumbObjsAndNames)))
while len(data.hisCrumbObjsAndNames) > count:
data.hisCrumbObjsAndNames.pop()
nextObj, name = data.hisCrumbObjsAndNames[len(data.hisCrumbObjsAndNames) - 1]
if not bRight:
assert self.left.hisCrumbObjsAndNames == data.hisCrumbObjsAndNames, "self.left.hisCrumbObjsAndNames = data.hisCrumbObjsAndNames"
self.query_and_push(nextObj, name, bPush=False, bRight=False)
self.apply_compare_if_needed()
self.update_log_text(bRight=False)
def on_breadcrumbtrail_ObjectHisLeft_crumb_click(self, item):
self.on_breadcrumbtrail_click_do(item, bRight=False)
def on_breadcrumbtrail_ObjectHisRight_crumb_click(self, item):
self.on_breadcrumbtrail_click_do(item, bRight=True)
def remove_address_str(self, strIn):
return re.sub(r'\(0x[0-9,A-F]{16}\)', '', strIn)
def apply_compare_if_needed(self):
if not self.compareMode:
return
lefts = self.left.filtered_attributes if self.left.filtered_attributes else self.left.attributes
rights = self.right.filtered_attributes if self.right.filtered_attributes else self.right.attributes
if not lefts:
lefts = []
if not rights:
rights = []
leftIDs = []
rightIDs = []
for i, left_attr in enumerate(lefts):
for j, right_attr in enumerate(rights):
if right_attr.name == left_attr.name:
if right_attr.result != left_attr.result:
if isinstance(right_attr.result, unreal.Transform):
if right_attr.result.is_near_equal(left_attr.result, location_tolerance=1e-20, rotation_tolerance=1e-20, scale3d_tolerance=1e-20):
continue
leftIDs.append(i)
rightIDs.append(j)
break
self.data.set_list_view_multi_column_selections(self.ui_detailListLeft, leftIDs)
self.data.set_list_view_multi_column_selections(self.ui_detailListRight, rightIDs)
self.diff_count = len(leftIDs)
def apply_search_filter(self, text, bRight):
_data = self.right if bRight else self.left
_data.filter_str = text if len(text) else ""
_data.filtered_attributes, _data.filteredIndexToIndex = self.filter(_data)
ui_listView = self.ui_detailListRight if bRight else self.ui_detailListLeft
self.show_data(_data, ui_listView)
self.apply_compare_if_needed()
def on_searchbox_FilterLeft_text_changed(self, text):
self.apply_search_filter(text if text is not None else "", bRight=False)
def on_searchbox_FilterLeft_text_committed(self, text):
self.apply_search_filter(text if text is not None else "", bRight=False)
def on_searchbox_FilterRight_text_changed(self, text):
self.apply_search_filter(text if text is not None else "", bRight=True)
def on_searchbox_FilterRight_text_committed(self, text):
self.apply_search_filter(text if text is not None else "", bRight=True)
def apply_filter(self):
_datas = [self.left, self.right]
_isRight = [False, True]
for data, bRight in zip(_datas, _isRight):
if len(data.hisCrumbObjsAndNames) > 0:
nextObj, name = data.hisCrumbObjsAndNames[len(data.hisCrumbObjsAndNames)-1]
self.query_and_push(nextObj, name, bPush=False, bRight=bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight=False) #
def try_get_object(self, data, obj, name:str):
index = -1
attribute = None
for i, attr in enumerate(data.attributes):
if attr.name == name:
index = i
attribute = attr
assert index >= 0
return attribute.result, name
def ui_on_checkbox_ShowBuiltin_state_changed(self, bEnabled):
self.showBuiltin = bEnabled
self.apply_filter()
def ui_on_checkbox_ShowOther_state_changed(self, bEnabled):
self.showOther = bEnabled
self.apply_filter()
def ui_on_checkbox_ShowProperties_state_changed(self, bEnabled):
self.showProperties = bEnabled
self.apply_filter()
def ui_on_checkbox_ShowEditorProperties_state_changed(self, bEnabled):
self.showEditorProperties = bEnabled
self.apply_filter()
def ui_on_checkbox_ShowParamFunction_state_changed(self, bEnabled):
self.showParamFunction = bEnabled
self.apply_filter()
def ui_on_listview_DetailList_selection_changed(self, bRight):
data = [self.left, self.right][bRight]
list_view = [self.ui_detailListLeft, self.ui_detailListRight][bRight]
selected_indices = set(self.data.get_list_view_multi_column_selection(list_view))
added = selected_indices - data.selected
de_selected = data.selected - selected_indices
for i, lineId in enumerate(added):
self.data.set_list_view_multi_column_line(list_view, lineId, data.get_plain(lineId, column_count=COLUMN_COUNT)
, rebuild_list=True if i == len(added)-1 and len(de_selected) == 0 else False)
for i, lineId in enumerate(de_selected):
self.data.set_list_view_multi_column_line(list_view, lineId, data.get_rich(lineId, column_count=COLUMN_COUNT)
, rebuild_list=True if i == len(de_selected)-1 else False)
data.selected = selected_indices
|
import unreal
_seek_comp_name : str = 'CapsuleComponent'
selected = unreal.EditorUtilityLibrary.get_selected_assets()[0]
name_selected = unreal.EditorAssetLibrary.get_path_name_for_loaded_asset(selected)
name_bp_c = name_selected + '_C'
loaded_bp = unreal.EditorAssetLibrary.load_blueprint_class(name_bp_c)
|
import unreal
import sys
sys.path.append('C:/project/-packages')
from PySide import QtGui, QtUiTools
WINDOW_NAME = 'Qt Window Two'
UI_FILE_FULLNAME = __file__.replace('.py', '.ui')
class QtWindowTwo(QtGui.QWidget):
def __init__(self, parent=None):
super(QtWindowTwo, self).__init__(parent)
self.aboutToClose = None # This is used to stop the tick when the window is closed
self.widget = QtUiTools.QUiLoader().load(UI_FILE_FULLNAME)
self.widget.setParent(self)
self.setWindowTitle(WINDOW_NAME)
self.setGeometry(100, 300, self.widget.width(), self.widget.height())
self.initialiseWidget()
def closeEvent(self, event):
if self.aboutToClose:
self.aboutToClose(self)
event.accept()
def eventTick(self, delta_seconds):
self.myTick(delta_seconds)
##########################################
def initialiseWidget(self):
self.time_while_this_window_is_open = 0.0
self.random_actor = None
self.widget.button_RotateRandom.clicked.connect(self.rotateRandomActorInScene)
def rotateRandomActorInScene(self):
import random
import WorldFunctions
all_actors = WorldFunctions.getAllActors(use_selection = False, actor_class = unreal.StaticMeshActor, actor_tag = None)
rand = random.randrange(0, len(all_actors))
self.random_actor = all_actors[rand]
def myTick(self, delta_seconds):
# Set Time
self.time_while_this_window_is_open += delta_seconds
self.widget.lbl_Seconds.setText("%.1f Seconds" % self.time_while_this_window_is_open)
# Affect Actor
if self.random_actor:
speed = 90.0 * delta_seconds
self.random_actor.add_actor_world_rotation(unreal.Rotator(0.0, 0.0, speed), False, False)
|
import unreal
DO_FOR_ALL = True
if DO_FOR_ALL:
actors = unreal.EditorLevelLibrary.get_all_level_actors()
else:
actors = unreal.EditorLevelLibrary.get_selected_level_actors()
relative_offset = unreal.Vector(-54278.789062, +98541.882812, +63584.894531)
for actor in actors:
current_location = actor.get_actor_location()
actor.set_actor_location(current_location + relative_offset, False, False)
|
import unreal
file_a = "/project/.fbx"
file_b = "/project/.fbx"
imported_scenes_path = "/project/"
print 'Preparing import options...'
advanced_mesh_options = unreal.DatasmithStaticMeshImportOptions()
advanced_mesh_options.set_editor_property('max_lightmap_resolution', unreal.DatasmithImportLightmapMax.LIGHTMAP_512)
advanced_mesh_options.set_editor_property('min_lightmap_resolution', unreal.DatasmithImportLightmapMin.LIGHTMAP_64)
advanced_mesh_options.set_editor_property('generate_lightmap_u_vs', True)
advanced_mesh_options.set_editor_property('remove_degenerates', True)
base_options = unreal.DatasmithImportBaseOptions()
base_options.set_editor_property('include_geometry', True)
base_options.set_editor_property('include_material', True)
base_options.set_editor_property('include_light', True)
base_options.set_editor_property('include_camera', True)
base_options.set_editor_property('include_animation', True)
base_options.set_editor_property('static_mesh_options', advanced_mesh_options)
base_options.set_editor_property('scene_handling', unreal.DatasmithImportScene.CURRENT_LEVEL)
base_options.set_editor_property('asset_options', []) # Not used
dg_options = unreal.DatasmithDeltaGenImportOptions()
dg_options.set_editor_property('merge_nodes', False)
dg_options.set_editor_property('optimize_duplicated_nodes', False)
dg_options.set_editor_property('remove_invisible_nodes', False)
dg_options.set_editor_property('simplify_node_hierarchy', False)
dg_options.set_editor_property('import_var', True)
dg_options.set_editor_property('var_path', "")
dg_options.set_editor_property('import_pos', True)
dg_options.set_editor_property('pos_path', "")
dg_options.set_editor_property('import_tml', True)
dg_options.set_editor_property('tml_path', "")
dg_options.set_editor_property('textures_dir', "")
dg_options.set_editor_property('intermediate_serialization', unreal.DatasmithDeltaGenIntermediateSerializationType.DISABLED)
dg_options.set_editor_property('colorize_materials', False)
dg_options.set_editor_property('generate_lightmap_u_vs', False)
dg_options.set_editor_property('import_animations', True)
# Direct import to scene and assets:
print 'Importing directly to scene...'
unreal.DeltaGenLibrary.import_(file_a, imported_scenes_path, base_options, None, False)
#2-stage import step 1:
print 'Parsing to scene object...'
scene = unreal.DatasmithDeltaGenSceneElement.construct_datasmith_scene_from_file(file_b, imported_scenes_path, base_options, dg_options)
print 'Resulting datasmith scene: ' + str(scene)
print '\tProduct name: ' + str(scene.get_product_name())
print '\tMesh actor count: ' + str(len(scene.get_all_mesh_actors()))
print '\tLight actor count: ' + str(len(scene.get_all_light_actors()))
print '\tCamera actor count: ' + str(len(scene.get_all_camera_actors()))
print '\tCustom actor count: ' + str(len(scene.get_all_custom_actors()))
print '\tMaterial count: ' + str(len(scene.get_all_materials()))
print '\tAnimationTimeline count: ' + str(len(scene.get_all_animation_timelines()))
print '\tVariant count: ' + str(len(scene.get_all_variants()))
# Modify one of the Timelines
# Warning: The Animation nested structure is all USTRUCTs, which are value types, and the Array accessor returns
# a copy. Meaning something like timeline[0].name = 'new_name' will set the name on the COPY of anim_nodes[0]
timelines = scene.get_all_animation_timelines()
if len(timelines) > 0:
tim_0 = timelines[0]
old_name = tim_0.name
print 'Timeline old name: ' + old_name
tim_0.name += '_MODIFIED'
modified_name = tim_0.name
print 'Anim node modified name: ' + modified_name
timelines[0] = tim_0
scene.set_all_animation_timelines(timelines)
# Check modification
new_timelines = scene.get_all_animation_timelines()
print 'Anim node retrieved modified name: ' + new_timelines[0].name
assert new_timelines[0].name == modified_name, "Node modification didn't work!"
# Restore to previous state
tim_0 = new_timelines[0]
tim_0.name = old_name
new_timelines[0] = tim_0
scene.set_all_animation_timelines(new_timelines)
# 2-stage import step 2:
print 'Importing assets and actors...'
result = scene.import_scene()
print 'Import results: '
print '\tImported actor count: ' + str(len(result.imported_actors))
print '\tImported mesh count: ' + str(len(result.imported_meshes))
print '\tImported level sequences: ' + str([a.get_name() for a in result.animations])
print '\tImported level variant sets asset: ' + str(result.level_variant_sets.get_name())
if result.import_succeed:
print 'Import succeeded!'
else:
print 'Import failed!'
|
# Copyright Epic Games, Inc. All Rights Reserved
"""
This script handles processing jobs for a specific queue asset
"""
import unreal
from .render_queue_jobs import render_jobs
from .utils import (
get_asset_data,
movie_pipeline_queue,
update_queue
)
def setup_queue_parser(subparser):
"""
This method adds a custom execution function and args to a queue subparser
:param subparser: Subparser for processing custom sequences
"""
# Set the name of the job
subparser.add_argument(
"queue",
type=str,
help="The name or path to a movie pipeline queue."
)
# Add option to only load the contents of the queue. By default,
# this will only load the queue and render its contents
subparser.add_argument(
"--load",
action="store_true",
help="Load the contents of the queue asset. By default the queue asset will loaded and render its contents.",
)
# We will use the level sequence and the map as our context for
# other subsequence arguments.
subparser.add_argument(
"--jobs",
type=str,
nargs="+",
help="A list of jobs to execute in the queue. "
"If no jobs are provided, all jobs in the queue will be rendered.",
)
# Function to process arguments
subparser.set_defaults(func=_process_args)
def render_queue_asset(
queue_name,
only_load=False,
shots=None,
jobs=None,
all_shots=False,
is_cmdline=False,
is_remote=False,
user=None,
remote_batch_name=None,
remote_job_preset=None,
executor_instance=None,
output_dir_override=None,
output_filename_override=None
):
"""
Render using a Movie Render Queue asset
:param str queue_name: The name of the Queue asset
:param bool only_load: Only load the queue asset. This is usually used when you need to process intermediary steps before rendering
:param list shots: Shots to render from the queue.
:param list jobs: The list job to render in the Queue asset.
:param bool all_shots: Flag to render all shots in a job in the queue.
:param bool is_cmdline: Flag to determine if the job is a commandline job
:param bool is_remote: Flag to determine if the jobs should be rendered remote
:param str user: Render user
:param str remote_batch_name: Batch name for remote renders
:param str remote_job_preset: Remote render job preset
:param executor_instance: Movie Pipeline executor instance
:param str output_dir_override: Movie Pipeline output directory override
:param str output_filename_override: Movie Pipeline filename format override
:return: MRQ Executor
"""
# The queue subsystem behaves like a singleton so
# clear all the jobs in the current queue.
movie_pipeline_queue.delete_all_jobs()
# Get the queue data asset package path by name or by path
# Create a new queue from the queue asset
movie_pipeline_queue.copy_from(
get_asset_data(queue_name, "MoviePipelineQueue").get_asset()
)
# If we only want to load the queue asset, then exit after loading.
# If we want to shut down the editor as well, then do so
if only_load:
if is_cmdline:
unreal.SystemLibrary.quit_editor()
return None
if not movie_pipeline_queue.get_jobs():
# Make sure we have jobs in the queue to work with
raise RuntimeError("There are no jobs in the queue!!")
# Allow executing the render queue in its current loaded state
if all_shots or (any([shots, jobs])):
update_queue(
jobs=jobs,
shots=shots,
all_shots=all_shots,
user=user
)
try:
# Execute the render. This will execute the render based on whether
# its remote or local
executor = render_jobs(
is_remote,
remote_batch_name=remote_batch_name,
remote_job_preset=remote_job_preset,
is_cmdline=is_cmdline,
executor_instance=executor_instance,
output_dir_override=output_dir_override,
output_filename_override=output_filename_override
)
except Exception:
raise
return executor
def _process_args(args):
"""
Function to process the arguments for the sequence subcommand
:param args: Parsed Arguments from parser
"""
return render_queue_asset(
args.queue,
only_load=args.load,
shots=args.shots,
jobs=args.jobs,
all_shots=args.all_shots,
is_remote=args.remote,
is_cmdline=args.cmdline,
user=args.user,
remote_batch_name=args.batch_name,
remote_job_preset=args.deadline_job_preset,
output_dir_override=args.output_override,
output_filename_override=args.filename_override
)
|
# -*- coding: utf-8 -*-
import unreal
def do_some_things(*args, **kwargs):
unreal.log("do_some_things start:")
for arg in args:
unreal.log(arg)
unreal.log("do_some_things end.")
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example script for instantiating an asset, cooking it and baking an
individual output object.
"""
_g_wrapper = None
def get_test_hda_path():
return '/project/.pig_head_subdivider_v01'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def on_post_process(in_wrapper):
print('on_post_process')
# Print details about the outputs and record the first static mesh we find
sm_index = None
sm_identifier = None
# in_wrapper.on_post_processing_delegate.remove_callable(on_post_process)
num_outputs = in_wrapper.get_num_outputs()
print('num_outputs: {}'.format(num_outputs))
if num_outputs > 0:
for output_idx in range(num_outputs):
identifiers = in_wrapper.get_output_identifiers_at(output_idx)
output_type = in_wrapper.get_output_type_at(output_idx)
print('\toutput index: {}'.format(output_idx))
print('\toutput type: {}'.format(output_type))
print('\tnum_output_objects: {}'.format(len(identifiers)))
if identifiers:
for identifier in identifiers:
output_object = in_wrapper.get_output_object_at(output_idx, identifier)
output_component = in_wrapper.get_output_component_at(output_idx, identifier)
is_proxy = in_wrapper.is_output_current_proxy_at(output_idx, identifier)
print('\t\tidentifier: {}'.format(identifier))
print('\t\toutput_object: {}'.format(output_object.get_name() if output_object else 'None'))
print('\t\toutput_component: {}'.format(output_component.get_name() if output_component else 'None'))
print('\t\tis_proxy: {}'.format(is_proxy))
print('')
if (output_type == unreal.HoudiniOutputType.MESH and
isinstance(output_object, unreal.StaticMesh)):
sm_index = output_idx
sm_identifier = identifier
# Bake the first static mesh we found to the CB
if sm_index is not None and sm_identifier is not None:
print('baking {}'.format(sm_identifier))
success = in_wrapper.bake_output_object_at(sm_index, sm_identifier)
print('success' if success else 'failed')
# Delete the instantiated asset
in_wrapper.delete_instantiated_asset()
global _g_wrapper
_g_wrapper = None
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset with auto-cook enabled
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform())
# Bind to the on post processing delegate (after a cook and after all
# outputs have been generated in Unreal)
_g_wrapper.on_post_processing_delegate.add_callable(on_post_process)
if __name__ == '__main__':
run()
|
# Copyright Epic Games, Inc. All Rights Reserved.
import os
import unreal
import flow.cmd
from peafour import P4
import subprocess as sp
from pathlib import Path
#-------------------------------------------------------------------------------
class _Collector(object):
def __init__(self):
self._dirs = []
self._careful_dirs = []
self._pinned_files = set()
def get_pinned_count(self):
return len(self._pinned_files)
def add_dir(self, path, *, remove_carefully=False):
(self._careful_dirs if remove_carefully else self._dirs).append(path)
def pin_file(self, path):
path = str(path.resolve())
self._pinned_files.add(path.lower())
def read_careful_dirs(self):
return (x for x in self._careful_dirs)
def dispatch(self, actuator):
actuator.begin()
for dir in self._dirs:
actuator.remove_dir(dir)
for dir in self._careful_dirs:
for item in (x for x in dir.rglob("*") if x.is_file()):
key = str(item.resolve()).lower()
if key not in self._pinned_files:
actuator.remove_file(item)
actuator.end()
#-------------------------------------------------------------------------------
class _DryRun(object):
def _spam(self):
mb_size = format(self._bytes // 1024 // 1024, ",") + "MB"
print("%d files to remove (%s)" % (self._count, mb_size), end="\r")
def begin(self):
self._bytes = 0
self._count = 0
self._breakdown = {}
def remove_dir(self, path):
for item in (x for x in path.rglob("*") if x.is_file()):
self.remove_file(item)
def remove_file(self, path):
self._bytes += path.stat().st_size
self._count += 1
if self._count % 493:
self._spam()
def end(self):
self._spam()
print()
#-------------------------------------------------------------------------------
class _Cleaner(object):
def __init__(self, work_dir):
self._work_dir = work_dir
def __del__(self):
if self._dir_count:
print(f"Launching background rmdir for {self._rubbish_dir}")
else:
print("Skipping background rmdir. No major directories to remove")
if os.name == "nt":
sp.Popen(
("cmd.exe", "/c", "rd", "/project/", str(self._rubbish_dir)),
stdout=sp.DEVNULL,
stderr=sp.DEVNULL
)
pass
else:
if self._rubbish_dir.is_dir() and self._rubbish_dir != "/": # paranoia!
sp.Popen(
("rm", "-rf", str(self._rubbish_dir)),
stdout=sp.DEVNULL,
stderr=sp.DEVNULL)
def _spam(self):
print(f"Moved {self._dir_count} directories, {self._file_count} files removed", end="\r", flush=True)
def begin(self):
rubbish_dir = self._work_dir / ".ushell_clean"
rubbish_dir.mkdir(parents=True, exist_ok=True)
self._rubbish_dir = rubbish_dir
self._dir_count = 0
self._file_count = 0
print(f"Moving directories to {self._rubbish_dir.name} and removing unversioned files")
def remove_dir(self, path):
dest_name = "%08x_%016x_%s_%s" % (os.getpid(), id(path), path.parent.name, path.name)
try:
path.rename(str(self._rubbish_dir / dest_name))
self._dir_count += 1
self._spam()
except OSError as e:
print("WARNING:", e)
def remove_file(self, item):
item.chmod(0o666)
item.unlink()
self._file_count += 1
self._spam()
def end(self):
if self._file_count or self._dir_count:
print()
#-------------------------------------------------------------------------------
class Clean(flow.cmd.Cmd):
""" Cleans intermediate and temporary files from an Unreal Engine branch. The
following sub-directories of .uproject, .uplugin, and Engine/ are cleaned up;
Intermediate - Removed entirely
DerivedDataCache - Removed entirely
Binaries - Unversioned files are removed
Saved - All sub-directories except --savedkeeps=... (see below)
Only a subset of sub-directories of Saved/ are removed. Any directories that
match --savedkeeps's comma-separated list are not removed. For example, to
clean everything excepted Saved/StagedBuilds/ and Saved/Profiling/;
.p4 clean --savedkeeps=StagedBuilds,Profiling
"StagedBuilds,Profiling" is the default value for --savedkeeps. If --allsaved
is given then all of Saved/ will be removed.
Note that the removal happens in two stages. First directories are moved into
.ushell_clean/ in the root of the branch. This directory is then removed in
the background after `.p4 clean` exits."""
dryrun = flow.cmd.Opt(False, "Do nothing except reports statistics")
allsaved = flow.cmd.Opt(False, "Completely clean Saved/ directories")
savedkeeps = flow.cmd.Opt("Profiling,StagedBuilds", "Comma-separated list of Saved/ sub-directories to keep")
def _append_saved(self, collector, saved_dir):
if self.args.allsaved:
collector.add_dir(saved_dir)
return
for sub_dir in (x for x in saved_dir.glob("*") if x.is_dir()):
if sub_dir.name.lower() not in self._saved_keeps:
collector.add_dir(sub_dir)
def _detect_locked_files(self, root_dir):
proc = sp.Popen(
("wmic.exe", "process", "get", "executablepath"),
stdout=sp.PIPE, stderr=sp.DEVNULL
)
ret = False
for line in proc.stdout.readlines():
line = line.strip().decode()
if line and Path(line).parent.is_relative_to(root_dir):
ret = line
break
proc.stdout.close()
proc.wait()
return ret
def main(self):
self._saved_keeps = {x.strip().lower() for x in self.args.savedkeeps.split(",")}
ue_context = unreal.Context(os.getcwd())
branch = ue_context.get_branch(must_exist=True)
engine_dir = ue_context.get_engine().get_dir()
if os.name == "nt":
self.print_info("Checking running processes")
if running_exe := self._detect_locked_files(branch.get_dir()):
raise RuntimeError(f"Not cleaning because '{running_exe}' is running")
return False
self.print_info("Finding directories and files to clean")
root_dirs = [
engine_dir,
*(x for x in engine_dir.glob("Programs/*") if x.is_dir()),
]
print("Enumerating...", end="")
rg_args = (
"rg",
"--files",
"--path-separator=/",
"--no-ignore",
"-g*.uplugin",
"-g*.uproject",
str(branch.get_dir()),
)
rg = sp.Popen(rg_args, stdout=sp.PIPE, stderr=sp.DEVNULL)
for line in rg.stdout.readlines():
path = Path(line.decode().rstrip()).parent
root_dirs.append(path)
rg.wait()
print("\r", len(root_dirs), " uproject/uplugin roots found", sep="")
collector = _Collector()
clean_handlers = {
"Intermediate" : lambda x,y: x.add_dir(y),
"DerivedDataCache" : lambda x,y: x.add_dir(y),
"Binaries" : lambda x,y: x.add_dir(y, remove_carefully=True),
"Saved" : self._append_saved,
}
for root_dir in root_dirs:
for dir_name, clean_handler in clean_handlers.items():
candidate = root_dir / dir_name
if candidate.is_dir():
clean_handler(collector, candidate)
# Ask Perforce which files shouldn't be deleted
print("Asking Perforce what's synced...", end="", flush=True)
specs = (str(x) + "/..." for x in collector.read_careful_dirs())
p4_have = P4.have(specs)
for item in p4_have.read(on_error=False):
path = Path(item.path)
collector.pin_file(path)
print("done (", collector.get_pinned_count(), " files)", sep="")
if self.args.dryrun:
collector.dispatch(_DryRun())
return
self.print_info("Cleaning")
actuator = _Cleaner(branch.get_dir())
collector.dispatch(actuator)
#-------------------------------------------------------------------------------
class Reset(flow.cmd.Cmd):
""" Reconciles a branch to make it match the depot. Use with caution; this is
a destructive action and involes removing and rewriting files! """
thorough = flow.cmd.Opt(False, "Compare digests instead of file-modified time")
def _get_reset_paths(self):
try:
ue_context = unreal.Context(os.getcwd())
if not (branch := ue_context.get_branch()):
return None
except EnvironmentError:
return None
root_dir = branch.get_dir()
ret = {
# root_dir / "*", # to much scope to go wrong here and it's so few files
root_dir / "Template/...",
root_dir / "Engine/...",
}
for uproj_path in branch.read_projects():
ret.add(uproj_path.parent.absolute() / "...")
return ret
def main(self):
# Confirm the user wants to really do a reset
self.print_warning("Destructive action!")
if self.is_interactive():
while True:
c = input("Are you sure you want to continue [yn] ?")
if c.lower() == "y": break
if c.lower() == "n": return False
# Run the reconcile
args = ("reconcile", "-wade",)
if not self.args.thorough:
args = (*args, "--modtime")
if reset_paths := self._get_reset_paths():
args = (*args, *(str(x) for x in reset_paths))
exec_context = self.get_exec_context()
cmd = exec_context.create_runnable("p4", *args)
return cmd.run()
|
import os
import sys
import inspect
import logging
import traceback
from functools import wraps
import pyblish.api
from . import ipc, settings, _state
from .vendor.six.moves import queue
from .vendor import six
if six.PY2:
class __FullArgSpec(object):
def __init__(self, func):
spec = inspect.getargspec(func)
self.args = spec.args
self.varargs = spec.varargs
self.varkw = spec.keywords
self.defaults = spec.defaults
self.kwonlyargs = []
self.kwonlydefaults = None
self.annotations = {}
get_arg_spec = __FullArgSpec
else:
get_arg_spec = inspect.getfullargspec
MODULE_DIR = os.path.dirname(__file__)
SPLASH_PATH = os.path.join(MODULE_DIR, "splash.png")
log = logging.getLogger(__name__)
def register_dispatch_wrapper(wrapper):
"""Register a dispatch wrapper for servers
The wrapper must have this exact signature:
(func, *args, **kwargs)
"""
signature = get_arg_spec(wrapper)
if any([len(signature.args) != 1,
signature.varargs is None,
signature.varkw is None]):
raise TypeError("Wrapper signature mismatch")
def _wrapper(func, *args, **kwargs):
"""Exception handling"""
try:
return wrapper(func, *args, **kwargs)
except Exception as e:
# Kill subprocess
_state["currentServer"].stop()
traceback.print_exc()
raise e
_state["dispatchWrapper"] = _wrapper
def deregister_dispatch_wrapper():
_state.pop("dispatchWrapper")
def dispatch_wrapper():
return _state.get("dispatchWrapper")
def current_server():
return _state.get("currentServer")
def install(modal):
"""Perform first time install"""
if _state.get("installed"):
sys.stdout.write("Already installed, uninstalling..\n")
uninstall()
use_threaded_wrapper = not modal
install_callbacks()
install_host(use_threaded_wrapper)
_state["installed"] = True
def uninstall():
"""Clean up traces of Pyblish QML"""
uninstall_callbacks()
sys.stdout.write("Pyblish QML shutdown successful.\n")
def show(parent=None,
targets=None,
modal=None,
auto_publish=False,
auto_validate=False):
"""Attempt to show GUI
Requires install() to have been run first, and
a live instance of Pyblish QML in the background.
Arguments:
parent (None, optional): Deprecated
targets (list, optional): Publishing targets
modal (bool, optional): Block interactions to parent
"""
# Get modal mode from environment
if modal is None:
modal = bool(os.environ.get("PYBLISH_QML_MODAL", False))
if not targets:
# If no targets are passed to pyblish-qml, we assume that we want the
# default target and the registered targets. This is to facilitate
# getting all plugins on pyblish_qml.show().
targets = ["default"] + pyblish.api.registered_targets()
# Automatically install if not already installed.
install(modal)
show_settings = settings.to_dict()
show_settings['autoPublish'] = auto_publish
show_settings['autoValidate'] = auto_validate
# Show existing GUI
if _state.get("currentServer"):
server = _state["currentServer"]
proxy = ipc.server.Proxy(server)
try:
# Update targets
proxy.target(targets)
proxy.show(show_settings)
return server
except IOError:
# The running instance has already been closed.
_state.pop("currentServer")
if not host.is_headless():
host.splash()
try:
service = ipc.service.Service()
server = ipc.server.Server(service, targets=targets, modal=modal)
except Exception:
# If for some reason, the GUI fails to show.
traceback.print_exc()
return host.desplash()
proxy = ipc.server.Proxy(server)
proxy.show(show_settings)
# Store reference to server for future calls
_state["currentServer"] = server
log.info("Success. QML server available as "
"pyblish_qml.api.current_server()")
server.listen()
return server
def proxy_call(func):
@wraps(func)
def proxy_call_wrapper(*args, **kwargs):
# get existing GUI
if _state.get("currentServer"):
server = _state["currentServer"]
proxy = ipc.server.Proxy(server)
try:
return func(proxy, *args, **kwargs)
except IOError:
# The running instance has already been closed.
_state.pop("currentServer")
return proxy_call_wrapper
@proxy_call
def publish(proxy):
proxy.publish()
@proxy_call
def validate(proxy):
proxy.validate()
@proxy_call
def hide(proxy):
proxy.hide()
@proxy_call
def quit(proxy):
proxy.quit()
def install_callbacks():
pyblish.api.register_callback("instanceToggled", _toggle_instance)
pyblish.api.register_callback("pluginToggled", _toggle_plugin)
def uninstall_callbacks():
pyblish.api.deregister_callback("instanceToggled", _toggle_instance)
pyblish.api.deregister_callback("pluginToggled", _toggle_plugin)
def _toggle_instance(instance, new_value, old_value):
"""Alter instance upon visually toggling it"""
instance.data["publish"] = new_value
def _toggle_plugin(plugin, new_value, old_value):
"""Alter plugin upon visually toggling it"""
plugin.active = new_value
def register_python_executable(path):
"""Expose Python executable to server
The Python executable must be compatible with the
version of PyQt5 installed or provided on the system.
"""
assert os.path.isfile(path), "Must be a file, such as python.exe"
_state["pythonExecutable"] = path
def registered_python_executable():
return _state.get("pythonExecutable")
def register_pyqt5(path):
"""Expose PyQt5 to Python
The exposed PyQt5 must be compatible with the exposed Python.
Arguments:
path (str): Absolute path to directory containing PyQt5
"""
_state["pyqt5"] = path
def install_host(use_threaded_wrapper):
"""Install required components into supported hosts
An unsupported host will still run, but may encounter issues,
especially with threading.
"""
for install in (_install_unreal,
_install_maya,
_install_houdini,
_install_nuke,
_install_nukeassist,
_install_hiero,
_install_nukestudio,
_install_blender):
try:
install(use_threaded_wrapper)
except ImportError:
pass
else:
break
SIGNALS_TO_REMOVE_EVENT_FILTER = (
"pyblishQmlClose",
"pyblishQmlCloseForced",
)
class Host(object):
def splash(self):
pass
def install(self, host):
pass
def uninstall(self):
pass
def is_headless(self):
return True
class QtHost(Host):
def __init__(self):
super(QtHost, self).__init__()
from .vendor.Qt import QtWidgets, QtCore, QtGui
self.app = QtWidgets.QApplication.instance()
self.window = None
self._state = {
"installed": False,
"splashWindow": None,
"eventFilter": None,
}
class EventFilter(QtCore.QObject):
def eventFilter(this, widget, event):
try:
func_name = {
QtCore.QEvent.Show: "rise",
QtCore.QEvent.Hide: "hide",
QtCore.QEvent.WindowActivate: "inFocus",
QtCore.QEvent.WindowDeactivate: "outFocus",
}[event.type()]
except KeyError:
return False
server = _state.get("currentServer")
if server is not None:
proxy = ipc.server.Proxy(server)
func = getattr(proxy, func_name)
try:
func()
return True
except IOError:
# The running instance has already been closed.
self.uninstall()
_state.pop("currentServer")
return False
class Splash(QtWidgets.QWidget):
"""Splash screen for loading QML via subprocess
Loading pyblish-qml may take some time, so when loading
from within an existing interpreter, such as Maya, this
splash screen can keep the user company during that time.
"""
def __init__(self, parent=None):
super(Splash, self).__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlags(
QtCore.Qt.WindowStaysOnTopHint |
QtCore.Qt.FramelessWindowHint
)
pixmap = QtGui.QPixmap(SPLASH_PATH)
image = QtWidgets.QLabel()
image.setPixmap(pixmap)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(image)
label = QtWidgets.QLabel(self)
label.move(20, 170)
label.show()
self.count = 0
self.label = label
self.setStyleSheet("""
QLabel {
color: white
}
""")
loop = QtCore.QTimer()
loop.timeout.connect(self.animate)
loop.start(330)
self.loop = loop
self.animate()
self.resize(200, 200)
def animate(self):
self.label.setText("loading" + "." * self.count)
self.count = (self.count + 1) % 4
self.Splash = Splash
self.EventFilter = EventFilter
def splash(self):
window = self.Splash()
window.show()
callback = "pyblishQmlShown", self.desplash
pyblish.api.register_callback(*callback)
self._state["splashWindow"] = window
def desplash(self):
try:
self._state.pop("splashWindow").close()
except (AttributeError, RuntimeError):
# Splash already closed
pass
pyblish.api.deregister_callback("pyblishQmlShown", self.desplash)
def is_headless(self):
return (
# Maya 2017+ in standalone
not hasattr(self.app, "activeWindow") or
# Maya 2016-
not self.app.activeWindow()
)
def install(self, host):
"""Setup common to all Qt-based hosts"""
print("Installing..")
if self._state["installed"]:
return
if self.is_headless():
log.info("Headless host")
return
print("aboutToQuit..")
self.app.aboutToQuit.connect(self._on_application_quit)
if host == "Maya":
print("Maya host..")
window = {
widget.objectName(): widget
for widget in self.app.topLevelWidgets()
}["MayaWindow"]
else:
window = self.find_window()
# Install event filter
print("event filter..")
event_filter = self.EventFilter(window)
window.installEventFilter(event_filter)
for signal in SIGNALS_TO_REMOVE_EVENT_FILTER:
pyblish.api.register_callback(signal, self.uninstall)
log.info("Installed event filter")
self.window = window
self._state["installed"] = True
self._state["eventFilter"] = event_filter
def uninstall(self):
print("uninstalling..")
if not self._state["installed"]:
return
try:
print("removing eventfilter..")
self.window.removeEventFilter(self._state["eventFilter"])
except AttributeError:
pass
print("removing callbacks..")
for signal in SIGNALS_TO_REMOVE_EVENT_FILTER:
try:
pyblish.api.deregister_callback(signal, self.uninstall)
except (KeyError, ValueError):
pass
self._state["installed"] = False
log.info("The eventFilter of pyblish-qml has been removed.")
def _on_application_quit(self):
"""Automatically kill QML on host exit"""
try:
_state["currentServer"].popen.kill()
except KeyError:
# No server started
pass
except OSError:
# Already dead
pass
def find_window(self):
"""Get top window in host"""
window = self.app.activeWindow()
while True:
parent_window = window.parent()
if parent_window:
window = parent_window
else:
break
return window
def _set_host_label(host_name):
if settings.ContextLabel == settings.ContextLabelDefault:
settings.ContextLabel = host_name
if settings.WindowTitle == settings.WindowTitleDefault:
settings.WindowTitle = "Pyblish ({})".format(host_name)
def _remove_googleapiclient():
"""Check if the compatibility must be maintained
The Maya 2018 version tries to import the `http` module from
Maya2018\plug-ins\MASH\scripts\googleapiclient\http.py in stead of the
module from six.py. This import conflict causes a crash Avalon's publisher.
This is due to Autodesk adding paths to the PYTHONPATH environment variable
which contain modules instead of only packages.
"""
keyword = "googleapiclient"
# reconstruct python paths
python_paths = os.environ["PYTHONPATH"].split(os.pathsep)
paths = [path for path in python_paths if keyword not in path]
os.environ["PYTHONPATH"] = os.pathsep.join(paths)
def _common_setup(host_name, threaded_wrapper, use_threaded_wrapper):
sys.stdout.write("Setting up Pyblish QML in {}\n".format(host_name))
if use_threaded_wrapper:
register_dispatch_wrapper(threaded_wrapper)
host.uninstall()
host.install(host_name)
_set_host_label(host_name)
def _install_unreal(use_threaded_wrapper):
import unreal
def threaded_wrapper(func, *args, **kwargs):
"""must load pyblish-unreal plugin.because this plugin create executeInMainThreadWithResult func for unreal module"""
return unreal.executeInMainThreadWithResult(func, *args, **kwargs)
sys.stdout.write("Setting up Pyblish QML in Unreal\n")
_common_setup("Unreal", threaded_wrapper, use_threaded_wrapper)
def _install_maya(use_threaded_wrapper):
"""Helper function to Autodesk Maya support"""
from maya import utils, cmds
def threaded_wrapper(func, *args, **kwargs):
return utils.executeInMainThreadWithResult(
func, *args, **kwargs)
sys.stdout.write("Setting up Pyblish QML in Maya\n")
if cmds.about(version=True) == "2018":
_remove_googleapiclient()
_common_setup("Maya", threaded_wrapper, use_threaded_wrapper)
def _install_houdini(use_threaded_wrapper):
"""Helper function to SideFx Houdini support"""
import hdefereval
def threaded_wrapper(func, *args, **kwargs):
return hdefereval.executeInMainThreadWithResult(
func, *args, **kwargs)
_common_setup("Houdini", threaded_wrapper, use_threaded_wrapper)
def _install_nuke(use_threaded_wrapper):
"""Helper function to The Foundry Nuke support"""
import nuke
not_nuke_launch = (
"--hiero" in nuke.rawArgs or
"--studio" in nuke.rawArgs or
"--nukeassist" in nuke.rawArgs
)
if not_nuke_launch:
raise ImportError
def threaded_wrapper(func, *args, **kwargs):
return nuke.executeInMainThreadWithResult(
func, args, kwargs)
_common_setup("Nuke", threaded_wrapper, use_threaded_wrapper)
def _install_nukeassist(use_threaded_wrapper):
"""Helper function to The Foundry NukeAssist support"""
import nuke
if "--nukeassist" not in nuke.rawArgs:
raise ImportError
def threaded_wrapper(func, *args, **kwargs):
return nuke.executeInMainThreadWithResult(
func, args, kwargs)
_common_setup("NukeAssist", threaded_wrapper, use_threaded_wrapper)
def _install_hiero(use_threaded_wrapper):
"""Helper function to The Foundry Hiero support"""
import hiero
import nuke
if "--hiero" not in nuke.rawArgs:
raise ImportError
def threaded_wrapper(func, *args, **kwargs):
return hiero.core.executeInMainThreadWithResult(
func, args, kwargs)
_common_setup("Hiero", threaded_wrapper, use_threaded_wrapper)
def _install_nukestudio(use_threaded_wrapper):
"""Helper function to The Foundry Hiero support"""
import nuke
if "--studio" not in nuke.rawArgs:
raise ImportError
def threaded_wrapper(func, *args, **kwargs):
return nuke.executeInMainThreadWithResult(
func, args, kwargs)
_common_setup("NukeStudio", threaded_wrapper, use_threaded_wrapper)
def _install_blender(use_threaded_wrapper):
"""Blender is a special snowflake
It doesn't have a mechanism with which to call commands from a thread
other than the main thread. So what's happening below is we run a polling
command every 10 milliseconds to see whether QML has any tasks for us.
If it does, then Blender runs this command (blocking while it does it),
and passes the result back to QML when ready.
The consequence of this is that we're polling even though nothing is
expected to arrive. The cost of polling is expected to be neglible,
but it's worth keeping in mind and ideally optimise away. E.g. only
poll when the QML window is actually open.
"""
import bpy
qml_to_blender = queue.Queue()
blender_to_qml = queue.Queue()
def threaded_wrapper(func, *args, **kwargs):
qml_to_blender.put((func, args, kwargs))
return blender_to_qml.get()
class PyblishQMLOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.pyblish_qml_timer"
bl_label = "Pyblish QML Timer Operator"
_timer = None
def modal(self, context, event):
if event.type == 'TIMER':
try:
func, args, kwargs = qml_to_blender.get_nowait()
except queue.Empty:
pass
else:
result = func(*args, **kwargs)
blender_to_qml.put(result)
return {'PASS_THROUGH'}
def execute(self, context):
wm = context.window_manager
# Check the queue ever 10 ms
# The cost of checking the queue is neglible, but it
# does mean having Python execute a command all the time,
# even as the artist is working normally and is nowhere
# near publishing anything.
self._timer = wm.event_timer_add(0.01, window=context.window)
wm.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
# Quit the Pyblish QML GUI, else it will be unresponsive
server = _state.get("currentServer")
if server:
proxy = ipc.server.Proxy(server)
proxy.quit()
log.info("Registering Blender + Pyblish operator")
bpy.utils.register_class(PyblishQMLOperator)
# Start the timer
bpy.ops.wm.pyblish_qml_timer()
# Expose externally, for debugging. It enables you to
# pause the timer, and add/remove commands by hand.
_state["QmlToBlenderQueue"] = qml_to_blender
_state["BlenderToQmlQueue"] = blender_to_qml
_common_setup("Blender", threaded_wrapper, use_threaded_wrapper)
# Support both Qt and non-Qt host
try:
host = QtHost()
except ImportError:
log.info("Non-Qt host found")
host = Host()
|
# Copyright Epic Games, Inc. All Rights Reserved.
import re
import unreal
import flow.cmd
import unrealcmd
import uelogprinter
import unreal.cmdline
#-------------------------------------------------------------------------------
class _CookPrettyPrinter(uelogprinter.Printer):
def __init__(self):
super().__init__()
self._cook_re = re.compile(r"ages (\d+).+otal (\d+)")
def _print(self, line):
m = self._cook_re.search(line)
if m:
percent = (int(m.group(1)) * 100) // max(int(m.group(2)), 1)
line += f" [{percent}%]"
super()._print(line)
#-------------------------------------------------------------------------------
class _Cook(unrealcmd.MultiPlatformCmd):
cookargs = unrealcmd.Arg([str], "Additional arguments passed to the cook commandlet")
cultures = unrealcmd.Opt("en", "Cultures to cook (comma separated, defaults to 'en')")
onthefly = unrealcmd.Opt(False, "Launch as an on-the-fly server")
iterate = unrealcmd.Opt(False, "Cook iteratively on top of the previous cook")
noxge = unrealcmd.Opt(False, "Disable XGE-based shader compilation")
unpretty = unrealcmd.Opt(False, "Turns off colourful pretty-printing")
attach = unrealcmd.Opt(False, "Attach a debugger to the cook")
debug = unrealcmd.Opt(False, "Use debug executables")
@unrealcmd.Cmd.summarise
def _cook(self, cook_form, exec_context=None):
ue_context = self.get_unreal_context()
# Check there is a valid project
project = ue_context.get_project()
if not project:
self.print_error("An active project is required to cook")
return False
# Prepare arguments
cultures = self.args.cultures.replace(",", "+")
commandlet_args = (
"-targetplatform=" + cook_form,
"-cookcultures=" + cultures if cultures else None,
"-unattended",
"-unversioned",
"-stdout",
"-cookonthefly" if self.args.onthefly else None,
"-iterate" if self.args.iterate else None,
"-noxgeshadercompile" if self.args.noxge else None,
*unreal.cmdline.read_ueified(*self.args.cookargs),
)
commandlet_args = tuple(x for x in commandlet_args if x)
# To support attaching a debugger we will reuse '.run commandlet'. This
# approach is not used normally as it interferes with pretty-printing and
# creates an awful lot of child processes.
if self.args.attach:
args = ("_run", "commandlet", "cook", "--noplatforms", "--attach")
if self.args.debug:
args = (*args, "debug")
args = (*args, "--", *commandlet_args)
# Disable our handling of Ctrl-C
import signal
def nop_handler(*args): pass
signal.signal(signal.SIGINT, nop_handler)
import subprocess
return subprocess.run(args).returncode
# Find the editor binary
target = ue_context.get_target_by_type(unreal.TargetType.EDITOR)
variant = unreal.Variant.DEBUG if self.args.debug else unreal.Variant.DEVELOPMENT
build = target.get_build(variant=variant)
if not build:
self.print_error(f"No {variant.name.lower()} editor build found")
return False
cmd = str(build.get_binary_path())
if not cmd.endswith("-Cmd.exe"):
cmd = cmd.replace(".exe", "-Cmd.exe")
# Launch
args = (
project.get_path(),
"-run=cook",
*commandlet_args,
)
exec_context = exec_context or self.get_exec_context()
cmd = exec_context.create_runnable(cmd, *args)
if self.args.unpretty or not self.is_interactive():
cmd.run()
else:
printer = _CookPrettyPrinter()
printer.run(cmd)
return cmd.get_return_code()
#-------------------------------------------------------------------------------
class Cook(_Cook):
""" Prepares a cooked meal of dataset for the given target platform """
target = unrealcmd.Arg(str, "Full target platform name (i.e. WindowsNoEditor)")
def main(self):
return self._cook(self.args.target)
#-------------------------------------------------------------------------------
class _Runtime(_Cook):
platform = unrealcmd.Arg("", "The platform to cook data for")
def main(self, target):
platform = self.args.platform
if not platform:
platform = unreal.Platform.get_host()
platform = self.get_platform(platform)
exec_context = super().get_exec_context()
exec_env = exec_context.get_env()
# Establish target platform's environment
name = platform.get_name()
self.print_info(name, "-", platform.get_version())
for item in platform.read_env():
try:
dir = str(item)
exec_env[item.key] = dir
except EnvironmentError:
dir = flow.cmd.text.red(item.get())
print(item.key, "=", dir)
cook_form = platform.get_cook_form(target.name.lower())
return self._cook(cook_form, exec_context)
#-------------------------------------------------------------------------------
class Game(_Runtime):
""" Cooks game data for a particular platform """
def main(self):
return super().main(unreal.TargetType.GAME)
#-------------------------------------------------------------------------------
class Client(_Runtime):
""" Cooks client data for a particular platform """
def main(self):
return super().main(unreal.TargetType.CLIENT)
#-------------------------------------------------------------------------------
class Server(_Runtime):
""" Cooks server data for a particular platform """
complete_platform = ("win64", "linux")
def main(self):
return super().main(unreal.TargetType.SERVER)
|
# Copyright Epic Games, Inc. All Rights Reserved.
import os
import re
import shutil
import unreal
#-------------------------------------------------------------------------------
class Platform(unreal.Platform):
name = "Android"
def _read_env(self):
version = self.get_version()
prefix = f"Android/{version}/"
env_vars = {}
# sdk
env_vars["ANDROID_HOME"] = "android-sdk-windows/" if version == "-22" else ""
# jdk
if version <= "-22":
env_vars["JAVA_HOME"] = "jdk1.8.0_77/"
elif version <= "-25":
env_vars["JAVA_HOME"] = "jre/"
else:
env_vars["JAVA_HOME"] = "jbr/"
# ndk
if version <= "-22":
env_vars["NDKROOT"] = "android-ndk-r14b/"
else:
if sdks_root := os.getenv("UE_SDKS_ROOT"):
from pathlib import Path
ndk_dir = Path(sdks_root)
ndk_dir /= "Host" + unreal.Platform.get_host()
ndk_dir /= prefix
ndk_dir /= "ndk"
if ndk_dir.is_dir():
ndk_ver = max(x.name for x in ndk_dir.glob("*") if x.is_dir())
env_vars["NDKROOT"] = "ndk/" + ndk_ver
env_vars.setdefault("NDKROOT", "ndk/")
# dispatch
for env_var, template in env_vars.items():
value = os.getenv(env_var)
if not value:
value = prefix + template
yield env_var, value
def _get_version_ue4(self):
dot_cs = self.get_unreal_context().get_engine().get_dir()
dot_cs /= "Source/project/.cs"
try:
import re
with open(dot_cs, "rt") as cs_in:
cs_in.read(8192)
lines = iter(cs_in)
next(x for x in lines if "override string GetRequiredSDKString()" in x)
next(lines) # {
for i in range(5):
line = next(lines)
if m := re.search(r'return "(-\d+)"', line):
return m.group(1)
except (StopIteration, FileNotFoundError):
pass
def _get_version_ue5(self):
func = "GetAutoSDKDirectoryForMainVersion"
dot_cs = "Source/project/"
version = self._get_version_helper_ue5(dot_cs + ".Versions.cs", func)
return version or self._get_version_helper_ue5(dot_cs + ".cs", func)
def _get_android_home(self):
out = next((v for k,v in self._read_env() if k == "ANDROID_HOME"), None)
if not out:
return
for prefix in ("", unreal.Platform.get_sdks_dir()):
if os.path.isdir(prefix + out):
return prefix + out
def _get_adb(self):
home_dir = self._get_android_home()
return home_dir + "/platform-tools/adb" if home_dir else "adb"
def _get_aapt(self):
home_dir = self._get_android_home()
if not home_dir:
return "aapt"
try:
build_tools_dir = home_dir + "/build-tools/"
latest = ""
for item in os.scandir(build_tools_dir):
latest = max(latest, item.name)
if latest and os.path.isdir(build_tools_dir):
return build_tools_dir + latest + "/aapt"
except FileNotFoundError:
return
def _get_cook_form(self, target):
if target == "game": return "Android_ASTC"
if target == "client": return "Android_ASTCClient"
|
import unreal
ar_asset_lists = unreal.EditorUtilityLibrary.get_selected_assets()
ar_selected_0 = ar_asset_lists[0]
for each in ar_asset_lists :
import_data_selected = each.get_editor_property("asset_import_data")
frame_rate_data = import_data_selected.get_editor_property("custom_sample_rate")
using_default_sample_rate = import_data_selected.get_editor_property("use_default_sample_rate")
import_data_selected.set_editor_property("use_default_sample_rate", False)
import_data_selected.set_editor_property("custom_sample_rate", 60)
|
# Copyright 2018 Epic Games, Inc.
import unreal
import os
import sys
"""
Functions to import FBX into Unreal
"""
def _sanitize_name(name):
# Remove the default Shotgun versioning number if found (of the form '.v001')
name_no_version = re.sub(r'.v[0-9]{3}', '', name)
# Replace any remaining '.' with '_' since they are not allowed in Unreal asset names
return name_no_version.replace('.', '_')
def _generate_fbx_import_task(filename, destination_path, destination_name=None, replace_existing=True,
automated=True, save=True, materials=True,
textures=True, as_skeletal=False):
"""
Create and configure an Unreal AssetImportTask
:param filename: The fbx file to import
:param destination_path: The Content Browser path where the asset will be placed
:return the configured AssetImportTask
"""
task = unreal.AssetImportTask()
task.filename = filename
task.destination_path = destination_path
# By default, destination_name is the filename without the extension
if destination_name is not None:
task.destination_name = destination_name
task.replace_existing = replace_existing
task.automated = automated
task.save = save
task.options = unreal.FbxImportUI()
task.options.import_materials = materials
task.options.import_textures = textures
task.options.import_as_skeletal = as_skeletal
task.options.static_mesh_import_data.combine_meshes = True
task.options.mesh_type_to_import = unreal.FBXImportType.FBXIT_STATIC_MESH
if as_skeletal:
task.options.mesh_type_to_import = unreal.FBXImportType.FBXIT_SKELETAL_MESH
return task
def main(argv):
tasks = []
tasks.append(_generate_fbx_import_task(*argv))
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(tasks)
unreal.EditorLoadingAndSavingUtils.save_dirty_packages(False, True)
if __name__ == "__main__":
# Script arguments must be, in order:
# Path to FBX to import
# Unreal content browser path where to store the imported asset
main(sys.argv[1:])
|
import unreal
from Components.toaster import show_toast # Import the toast function
def group_actors_by_type():
"""Groups all actors in the level by type and moves them into respective folders."""
editor_actor_subsystem = unreal.get_editor_subsystem(unreal.EditorActorSubsystem)
all_actors = editor_actor_subsystem.get_all_level_actors()
actors_by_type = {}
light_actor_types = [
"DirectionalLight",
"SkyAtmosphere",
"SkyLight",
"VolumetricFog",
"ExponentialHeightFog",
"AtmosphericFog",
]
for actor in all_actors:
actor_class = actor.get_class().get_name()
if actor_class in light_actor_types:
actor_class = "Lights"
if actor_class not in actors_by_type:
actors_by_type[actor_class] = []
actors_by_type[actor_class].append(actor)
for actor_type, actors in actors_by_type.items():
folder_path = actor_type
for actor in actors:
actor.set_folder_path(folder_path)
message = f"✅ Grouped {len(all_actors)} actors into {len(actors_by_type)} folders."
print(message)
show_toast(message) # Call the toast function
|
import unreal
blueprint, = unreal.EditorUtilityLibrary.get_selected_assets()
path = "%s_C" % blueprint.get_path_name()
bp_gc = unreal.load_object(None, path)
bp_cdo = unreal.get_default_object(bp_gc)
bp_cdo.set_editor_property("default_material",False)
bp_cdo.set_editor_property("default_out_line",False)
bp_cdo.set_editor_property("default_face_material",False)
bp_cdo.set_editor_property("default_face_outline_material",False)
bp_cdo.set_editor_property("enable_self_shadow",True)
|
#from pyautogui import*
#import pyautogui
import time
#import keyboard
import random
#import wid32api, win32con
import unreal
# get_selected_assets
@unreal.uclass()
class MyEditorUtility(unreal.GlobalEditorUtilityBase):
pass
#pockets
selectedAssets = MyEditorUtility().get_selected_assets()
for asset in range(selectedAssets):
unreal.log("type() : " + asset.get_class())
|
# -*- coding: utf-8 -*-
"""
属性批量传递工具
- [x] TreeView
- [x] Overlay 组件
- [x] 过滤接入
- [x] 选择资产属性配置颜色
- [x] 右键菜单
- [ ] ~勾选属性导出导入~
- [ ] ~蓝图属性复制~
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__author__ = "timmyliang"
__email__ = "[email protected]"
__date__ = "2021-04-01 20:12:44"
import os
import sys
import webbrowser
from collections import defaultdict
from functools import partial
import unreal
from QBinder import BinderTemplate
from Qt import QtCore, QtWidgets, QtGui
from Qt.QtCompat import load_ui
from ue_util import error_log, toast
from dayu_widgets.item_model import MTableModel, MSortFilterModel
level_lib = unreal.EditorLevelLibrary()
util_lib = unreal.EditorUtilityLibrary()
asset_lib = unreal.EditorAssetLibrary()
sys_lib = unreal.SystemLibrary()
mat_lib = unreal.MaterialEditingLibrary()
py_lib = unreal.PyToolkitBPLibrary()
DIR = os.path.dirname(__file__)
def cast(typ, obj):
try:
return getattr(unreal, typ).cast(obj)
except:
return None
@error_log
class MaterialTransfer(object):
def _get_material_paramters(self, expressions):
"""
inspire by https://github.com/project/
"""
paramters = defaultdict(set)
for expresion in expressions:
func = cast("MaterialExpressionMaterialFunctionCall", expresion)
if func:
func = func.get_editor_property("material_function")
expressions = py_lib.get_material_function_expressions(func)
params = self._get_material_paramters(expressions)
for group, param in params.items():
for p in param:
paramters[str(group)].add(str(p))
continue
param = cast("MaterialExpressionParameter", expresion)
if not param:
param = cast("MaterialExpressionTextureSampleParameter", expresion)
if not param:
param = cast("MaterialExpressionFontSampleParameter", expresion)
if param:
group = param.get_editor_property("group")
parameter_name = param.get_editor_property("parameter_name")
paramters[str(group)].add(str(parameter_name))
return paramters
def get_material_property(self, material):
scalars = mat_lib.get_scalar_parameter_names(material)
vectors = mat_lib.get_vector_parameter_names(material)
switches = mat_lib.get_static_switch_parameter_names(material)
textures = mat_lib.get_texture_parameter_names(material)
parent_material = material.get_base_material()
expressions = py_lib.get_material_expressions(parent_material)
paramters = self._get_material_paramters(expressions)
collections = defaultdict(dict)
for grp, params in sorted(paramters.items()):
for p in sorted(params):
value = None
if p in textures:
func = mat_lib.get_material_instance_texture_parameter_value
value = func(material, p)
value = value.get_path_name()
elif p in switches:
value = mat_lib.get_material_instance_static_switch_parameter_value(
material, p
)
elif p in vectors:
func = mat_lib.get_material_instance_vector_parameter_value
value = func(material, p)
value = "|".join(
[
"%s : %-10s" % (c.upper(), round(getattr(value, c), 2))
for c in "rgba"
]
)
elif p in scalars:
func = mat_lib.get_material_instance_scalar_parameter_value
value = func(material, p)
collections[grp][p] = value
return collections
def update_material_property(self, material):
scalars = mat_lib.get_scalar_parameter_names(material)
vectors = mat_lib.get_vector_parameter_names(material)
switches = mat_lib.get_static_switch_parameter_names(material)
textures = mat_lib.get_texture_parameter_names(material)
all_params = []
all_params.extend(scalars)
all_params.extend(vectors)
all_params.extend(switches)
all_params.extend(textures)
property_list = self.property_model.get_data_list()
for group in property_list:
green_list = []
red_list = []
children = group.get("children")
children_count = len(children)
for prop in children:
prop["bg_color"] = QtGui.QColor("transparent")
if not prop.get("property_checked"):
continue
attr = prop.get("property")
if attr in all_params:
green_list.append(attr)
prop["bg_color"] = QtGui.QColor("green")
else:
red_list.append(attr)
prop["bg_color"] = QtGui.QColor("red")
if len(green_list) == children_count:
group["bg_color"] = QtGui.QColor("green")
elif len(red_list) == children_count:
group["bg_color"] = QtGui.QColor("red")
elif not green_list and not red_list:
group["bg_color"] = QtGui.QColor("transparent")
else:
group["bg_color"] = QtGui.QColor("yellowgreen")
# NOTE 更新界面
QtCore.QTimer.singleShot(0, self.repaint)
def transfer_material_property(self, material, property_list):
scalars = mat_lib.get_scalar_parameter_names(material)
vectors = mat_lib.get_vector_parameter_names(material)
switches = mat_lib.get_static_switch_parameter_names(material)
textures = mat_lib.get_texture_parameter_names(material)
for p in property_list:
if p in textures:
getter = mat_lib.get_material_instance_texture_parameter_value
setter = mat_lib.set_material_instance_texture_parameter_value
setter(material, p, getter(self.source, p))
elif p in switches:
getter = mat_lib.get_material_instance_static_switch_parameter_value
setter = py_lib.set_material_instance_static_switch_parameter_value
setter(material, p, getter(self.source, p))
elif p in vectors:
getter = mat_lib.get_material_instance_vector_parameter_value
setter = mat_lib.set_material_instance_vector_parameter_value
setter(material, p, getter(self.source, p))
elif p in scalars:
getter = mat_lib.get_material_instance_scalar_parameter_value
setter = mat_lib.set_material_instance_scalar_parameter_value
setter(material, p, getter(self.source, p))
@error_log
class AssetList(object):
def locate_file_location(self):
data_list = self.asset_model.get_data_list()
index = self.Asset_List.selectionModel().currentIndex()
if not index or not data_list:
toast(u"没有元素可定位")
return
asset = data_list[index.row()]["asset"]
path = asset.get_path_name()
# path = os.path.splitext(path)[0]
project = sys_lib.get_project_content_directory()
path = path.replace("/Game/", project)
path = os.path.dirname(path)
if os.path.exists(path):
os.startfile(path)
else:
toast(u"当前路径不存在")
def locate_asset_location(self):
data_list = self.asset_model.get_data_list()
index = self.Asset_List.selectionModel().currentIndex()
if not index or not data_list:
toast(u"没有元素可定位")
return
asset = data_list[index.row()]["asset"]
path = asset.get_path_name()
if asset_lib.does_asset_exist(path):
asset_lib.sync_browser_to_objects([path])
else:
toast(u"元素不存在")
def remove_assets(self):
data_list = self.asset_model.get_data_list()
indexes = self.Asset_List.selectionModel().selectedRows()
if not indexes or not data_list:
return
for index in sorted(indexes, reverse=True):
data_list.pop(index.row())
self.asset_model.set_data_list(data_list)
class PropertyTransferBinder(BinderTemplate):
def __init__(self):
dumper = self("dumper")
dumper.set_auto_load(False)
self.source_eanble = False
self.lable_visible = False
self.lable_text = ""
@error_log
class PropertyTransferTool(QtWidgets.QWidget, MaterialTransfer, AssetList):
state = PropertyTransferBinder()
def update_icon(self):
data = {
"Del": {
"icon": QtWidgets.QStyle.SP_BrowserStop,
"callback": self.remove_assets,
"tooltip": u"删除文件",
},
"Locate": {
"icon": QtWidgets.QStyle.SP_FileDialogContentsView,
"callback": self.locate_asset_location,
"tooltip": u"定位文件",
},
"Drive": {
"icon": QtWidgets.QStyle.SP_DriveHDIcon,
"callback": self.locate_file_location,
"tooltip": u"打开系统目录路径",
},
"Expand": {
"icon": QtWidgets.QStyle.SP_ToolBarVerticalExtensionButton,
"callback": self.Property_Tree.expandAll,
"tooltip": u"扩展全部",
},
"Collapse": {
"icon": QtWidgets.QStyle.SP_ToolBarHorizontalExtensionButton,
"callback": self.Property_Tree.collapseAll,
"tooltip": u"收缩全部",
},
"Source_Locate": {
"icon": QtWidgets.QStyle.SP_FileDialogContentsView,
"callback": lambda: asset_lib.sync_browser_to_objects(
[self.source.get_path_name()]
),
"tooltip": u"定位复制源",
},
}
widget_dict = {
"BTN": "clicked",
"Action": "triggered",
}
style = QtWidgets.QApplication.style()
icon = style.standardIcon(QtWidgets.QStyle.SP_BrowserReload)
self.setWindowIcon(icon)
for typ, info in data.items():
icon = style.standardIcon(info.get("icon"))
tooltip = info.get("tooltip", "")
tooltip = '<span style="font-weight:600;">%s</span>' % tooltip
callback = info.get("callback", lambda *args: None)
for widget, signal in widget_dict.items():
widget = "%s_%s" % (typ, widget)
if hasattr(self, widget):
widget = getattr(self, widget)
widget.setIcon(icon)
getattr(widget, signal).connect(callback)
widget.setToolTip(tooltip)
widget.setEnabled(lambda: self.state.source_eanble)
widget.setEnabled(self.state.source_eanble)
# print(widget.isEnabled())
# QtCore.QTimer.singleShot(0, lambda: self.state.source_eanble >> Set(False))
def __init__(self, parent=None):
super(PropertyTransferTool, self).__init__(parent)
DIR, file_name = os.path.split(__file__)
file_name = os.path.splitext(file_name)[0]
load_ui(os.path.join(DIR, "%s.ui" % file_name), self)
self.source = None
help_link = "http://redarttoolkit.pages.oa.com/project/.html"
self.Help_Action.triggered.connect(lambda: webbrowser.open_new_tab(help_link))
# NOTE 设置按钮图标
QtCore.QTimer.singleShot(0, self.update_icon)
menu_callback = lambda: self.Asset_Menu.popup(QtGui.QCursor.pos())
self.Asset_List.customContextMenuRequested.connect(menu_callback)
menu_callback = lambda: self.Property_Menu.popup(QtGui.QCursor.pos())
self.Property_Tree.customContextMenuRequested.connect(menu_callback)
self.Asset_Label.setVisible(lambda: self.state.lable_visible)
self.Asset_Label.setText(lambda: self.state.lable_text)
self.Src_BTN.clicked.connect(self.get_source)
self.Dst_BTN.clicked.connect(self.get_destination)
self.Transfer_BTN.clicked.connect(self.transfer_property)
# NOTE 配置 splitter
name = "%s.ini" % self.__class__.__name__
self.settings = QtCore.QSettings(name, QtCore.QSettings.IniFormat)
self.Splitter.splitterMoved.connect(
lambda: self.settings.setValue("splitter_size", self.Splitter.sizes())
)
splitter_size = self.settings.value("splitter_size")
size = [int(i) for i in splitter_size] if splitter_size else [700, 200]
self.Splitter.setSizes(size)
# NOTE 配置搜索栏
self.Dst_Filter_LE.search()
self.Dst_Filter_LE.setPlaceholderText("")
self.Prop_Filter_LE.search()
self.Prop_Filter_LE.setPlaceholderText("")
# NOTE 配置 Property_Tree
self.property_model = MTableModel()
columns = {u"property": "属性名", u"value": "数值"}
self.property_header_list = [
{
"label": label,
"key": key,
"bg_color": lambda x, y: y.get("bg_color", QtGui.QColor("transparent")),
"checkable": i == 0,
"searchable": True,
"width": 300,
"font": lambda x, y: {"bold": True},
}
for i, (key, label) in enumerate(columns.items())
]
self.property_model.set_header_list(self.property_header_list)
self.property_model_sort = MSortFilterModel()
self.property_model_sort.set_header_list(self.property_header_list)
self.property_model_sort.setSourceModel(self.property_model)
self.Prop_Filter_LE.textChanged.connect(
self.property_model_sort.set_search_pattern
)
self.Property_Tree.setModel(self.property_model_sort)
header = self.Property_Tree.header()
header.setStretchLastSection(True)
# NOTE 配置 Asset_List
self.asset_model = MTableModel()
self.asset_header_list = [
{
"label": "destination",
"key": "destination",
"bg_color": lambda x, y: y.get("bg_color", QtGui.QColor("transparent")),
"tooltip": lambda x, y: y.get("asset").get_path_name(),
"checkable": True,
"searchable": True,
"width": 300,
"font": lambda x, y: {"bold": True},
}
]
self.asset_model.set_header_list(self.asset_header_list)
self.asset_model_sort = MSortFilterModel()
self.asset_model_sort.search_reg.setPatternSyntax(QtCore.QRegExp.RegExp)
self.asset_model_sort.set_header_list(self.asset_header_list)
self.asset_model_sort.setSourceModel(self.asset_model)
self.Dst_Filter_LE.textChanged.connect(self.asset_model_sort.set_search_pattern)
self.Asset_List.setModel(self.asset_model_sort)
self.Asset_List.selectionModel().selectionChanged.connect(
self.asset_selection_change
)
self.property_model.dataChanged.connect(self.asset_selection_change)
def get_source(self):
assets = util_lib.get_selected_assets()
if len(assets) < 1:
toast(u"请选择一个资产")
return
asset = assets[0]
self.set_property_tree(asset)
self.asset_model.set_data_list([])
self.property_model_sort.sort(0, QtCore.Qt.AscendingOrder)
def get_destination(self):
if not self.source:
toast(u"请拾取复制源")
return
data_list = self.asset_model.get_data_list()
tooltip_list = [data.get("asset").get_path_name() for data in data_list]
assets = [
asset
for asset in util_lib.get_selected_assets()
if isinstance(asset, type(self.source))
and asset.get_path_name() not in tooltip_list
]
if not assets:
toast(u"请选择匹配的资产")
return
data_list.extend(
[
{
"bg_color": QtGui.QColor("transparent"),
"asset": asset,
"destination": asset.get_name(),
# NOTE 默认勾选
"destination_checked": QtCore.Qt.Checked,
}
for asset in assets
if not asset is self.source
]
)
self.asset_model.set_data_list(data_list)
def asset_selection_change(self, *args):
model = self.Asset_List.selectionModel()
indexes = model.selectedRows()
if not indexes:
return
index = indexes[0]
data_list = self.asset_model.get_data_list()
data = data_list[index.row()]
asset = data.get("asset")
if isinstance(asset, unreal.MaterialInterface):
self.update_material_property(asset)
def set_property_tree(self, asset):
if isinstance(asset, unreal.MaterialInterface):
data = self.get_material_property(asset)
elif isinstance(asset, unreal.Blueprint):
pass
else:
toast(u"不支持资产")
return
data_list = [
{
"property": group,
"value": "",
"children": [
{
"bg_color": QtGui.QColor("transparent"),
"property": prop,
"property_checked": QtCore.Qt.Unchecked,
"value": value,
}
for prop, value in props.items()
],
}
for group, props in data.items()
]
self.property_model.set_data_list(data_list)
self.source = asset
self.state.source_eanble = True
self.state.lable_visible = True
self.state.lable_text = asset.get_name()
def transfer_property(self):
for i, asset in enumerate(self.asset_model.get_data_list()):
if not asset.get("destination_checked"):
continue
# NOTE 选择 Asset_List 的资产 更新颜色
index = self.asset_model.index(i, 0)
model = self.Asset_List.selectionModel()
model.setCurrentIndex(index, model.SelectCurrent)
property_list = [
prop.get("property")
for grp in self.property_model.get_data_list()
for prop in grp.get("children")
# NOTE 过滤没有勾选的资源
if prop.get("bg_color") == QtGui.QColor("green")
and prop.get("property_checked")
]
# NOTE 传递属性
asset = asset.get("asset")
if isinstance(asset, type(self.source)):
self.transfer_material_property(asset, property_list)
else:
raise RuntimeError(u"%s 和复制源类型不匹配" % asset.get_name())
toast("传递完成",'success')
if __name__ == "__main__":
widget = PropertyTransferTool()
widget.show()
|
import unreal
import json
from typing import Dict, Any, List, Tuple, Union, Optional
from utils import unreal_conversions as uc
from utils import logging as log
def handle_create_blueprint(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to create a new Blueprint from a specified parent class
Args:
command: The command dictionary containing:
- blueprint_name: Name for the new Blueprint
- parent_class: Parent class name or path (e.g., "Actor", "/project/.Actor")
- save_path: Path to save the Blueprint asset (e.g., "/project/")
Returns:
Response dictionary with success/failure status and the Blueprint path if successful
"""
try:
blueprint_name = command.get("blueprint_name", "NewBlueprint")
parent_class = command.get("parent_class", "Actor")
save_path = command.get("save_path", "/project/")
log.log_command("create_blueprint", f"Name: {blueprint_name}, Parent: {parent_class}")
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
blueprint = gen_bp_utils.create_blueprint(blueprint_name, parent_class, save_path)
if blueprint:
blueprint_path = f"{save_path}/{blueprint_name}"
log.log_result("create_blueprint", True, f"Path: {blueprint_path}")
return {"success": True, "blueprint_path": blueprint_path}
else:
log.log_error(f"Failed to create Blueprint {blueprint_name}")
return {"success": False, "error": f"Failed to create Blueprint {blueprint_name}"}
except Exception as e:
log.log_error(f"Error creating blueprint: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_add_component(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to add a component to a Blueprint
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- component_class: Component class to add (e.g., "StaticMeshComponent")
- component_name: Name for the new component
Returns:
Response dictionary with success/failure status
"""
try:
blueprint_path = command.get("blueprint_path")
component_class = command.get("component_class")
component_name = command.get("component_name")
if not blueprint_path or not component_class:
log.log_error("Missing required parameters for add_component")
return {"success": False, "error": "Missing required parameters"}
log.log_command("add_component", f"Blueprint: {blueprint_path}, Component: {component_class}")
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
success = gen_bp_utils.add_component(blueprint_path, component_class, component_name or "")
if success:
log.log_result("add_component", True, f"Added {component_class} to {blueprint_path}")
return {"success": True}
else:
log.log_error(f"Failed to add component {component_class} to {blueprint_path}")
return {"success": False, "error": f"Failed to add component {component_class} to {blueprint_path}"}
except Exception as e:
log.log_error(f"Error adding component: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_add_variable(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to add a variable to a Blueprint
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- variable_name: Name for the new variable
- variable_type: Type of the variable (e.g., "float", "vector", "boolean")
- default_value: Default value for the variable (optional)
- category: Category for organizing variables in the Blueprint editor (optional)
Returns:
Response dictionary with success/failure status
"""
try:
blueprint_path = command.get("blueprint_path")
variable_name = command.get("variable_name")
variable_type = command.get("variable_type")
default_value = command.get("default_value", "")
category = command.get("category", "Default")
if not blueprint_path or not variable_name or not variable_type:
log.log_error("Missing required parameters for add_variable")
return {"success": False, "error": "Missing required parameters"}
log.log_command("add_variable",
f"Blueprint: {blueprint_path}, Variable: {variable_name}, Type: {variable_type}")
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
success = gen_bp_utils.add_variable(blueprint_path, variable_name, variable_type, str(default_value), category)
if success:
log.log_result("add_variable", True, f"Added {variable_type} variable {variable_name} to {blueprint_path}")
return {"success": True}
else:
log.log_error(f"Failed to add variable {variable_name} to {blueprint_path}")
return {"success": False, "error": f"Failed to add variable {variable_name} to {blueprint_path}"}
except Exception as e:
log.log_error(f"Error adding variable: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_add_function(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to add a function to a Blueprint
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- function_name: Name for the new function
- inputs: List of input parameters [{"name": "param1", "type": "float"}, ...] (optional)
- outputs: List of output parameters (optional)
Returns:
Response dictionary with success/failure status and the function ID if successful
"""
try:
blueprint_path = command.get("blueprint_path")
function_name = command.get("function_name")
inputs = command.get("inputs", [])
outputs = command.get("outputs", [])
if not blueprint_path or not function_name:
log.log_error("Missing required parameters for add_function")
return {"success": False, "error": "Missing required parameters"}
log.log_command("add_function", f"Blueprint: {blueprint_path}, Function: {function_name}")
# Convert inputs and outputs to JSON strings for C++ function
inputs_json = json.dumps(inputs)
outputs_json = json.dumps(outputs)
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
function_id = gen_bp_utils.add_function(blueprint_path, function_name, inputs_json, outputs_json)
if function_id:
log.log_result("add_function", True,
f"Added function {function_name} to {blueprint_path} with ID: {function_id}")
return {"success": True, "function_id": function_id}
else:
log.log_error(f"Failed to add function {function_name} to {blueprint_path}")
return {"success": False, "error": f"Failed to add function {function_name} to {blueprint_path}"}
except Exception as e:
log.log_error(f"Error adding function: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_add_node(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to add any type of node to a Blueprint graph
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- function_id: ID of the function to add the node to
- node_type: Type of node to add - can be any of:
* Function name (e.g. "K2_SetActorLocation")
* Node class name (e.g. "Branch", "Sequence", "ForLoop")
* Full node class path (e.g. "K2Node_IfThenElse")
- node_position: Position of the node in the graph [X, Y]
- node_properties: Dictionary of properties to set on the node (optional)
* Can include pin values, node settings, etc.
- target_class: Optional class to use for function calls (default: "Actor")
Returns:
Response dictionary with success/failure status and the node ID if successful
"""
try:
blueprint_path = command.get("blueprint_path")
function_id = command.get("function_id")
node_type = command.get("node_type")
node_position = command.get("node_position", [0, 0])
node_properties = command.get("node_properties", {})
if not blueprint_path or not function_id or not node_type:
log.log_error("Missing required parameters for add_node")
return {"success": False, "error": "Missing required parameters"}
log.log_command("add_node", f"Blueprint: {blueprint_path}, Node: {node_type}")
# Convert node properties to JSON for C++ function
node_properties_json = json.dumps(node_properties)
# Call the C++ implementation from UGenBlueprintNodeCreator
node_creator = unreal.GenBlueprintNodeCreator
node_id = node_creator.add_node(blueprint_path, function_id, node_type,
node_position[0], node_position[1],
node_properties_json)
if node_id:
log.log_result("add_node", True, f"Added node {node_type} to {blueprint_path} with ID: {node_id}")
return {"success": True, "node_id": node_id}
else:
log.log_error(f"Failed to add node {node_type} to {blueprint_path}")
return {"success": False, "error": f"Failed to add node {node_type} to {blueprint_path}"}
except Exception as e:
log.log_error(f"Error adding node: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_connect_nodes(command: Dict[str, Any]) -> Dict[str, Any]:
try:
blueprint_path = command.get("blueprint_path")
function_id = command.get("function_id")
source_node_id = command.get("source_node_id")
source_pin = command.get("source_pin")
target_node_id = command.get("target_node_id")
target_pin = command.get("target_pin")
if not all([blueprint_path, function_id, source_node_id, source_pin, target_node_id, target_pin]):
log.log_error("Missing required parameters for connect_nodes")
return {"success": False, "error": "Missing required parameters"}
log.log_command("connect_nodes",
f"Blueprint: {blueprint_path}, {source_node_id}.{source_pin} -> {target_node_id}.{target_pin}")
gen_bp_utils = unreal.GenBlueprintUtils
result_json = gen_bp_utils.connect_nodes(blueprint_path, function_id,
source_node_id, source_pin,
target_node_id, target_pin)
result = json.loads(result_json)
if result.get("success"):
log.log_result("connect_nodes", True, f"Connected nodes in {blueprint_path}")
return {"success": True}
else:
log.log_error(f"Failed to connect nodes: {result.get('error')}")
return result # Pass through the detailed response with available pins
except Exception as e:
log.log_error(f"Error connecting nodes: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_compile_blueprint(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to compile a Blueprint
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
Returns:
Response dictionary with success/failure status
"""
try:
blueprint_path = command.get("blueprint_path")
if not blueprint_path:
log.log_error("Missing required parameters for compile_blueprint")
return {"success": False, "error": "Missing required parameters"}
log.log_command("compile_blueprint", f"Blueprint: {blueprint_path}")
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
success = gen_bp_utils.compile_blueprint(blueprint_path)
if success:
log.log_result("compile_blueprint", True, f"Compiled blueprint: {blueprint_path}")
return {"success": True}
else:
log.log_error(f"Failed to compile blueprint: {blueprint_path}")
return {"success": False, "error": f"Failed to compile blueprint: {blueprint_path}"}
except Exception as e:
log.log_error(f"Error compiling blueprint: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_spawn_blueprint(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to spawn a Blueprint actor in the level
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- location: [X, Y, Z] coordinates (optional)
- rotation: [Pitch, Yaw, Roll] in degrees (optional)
- scale: [X, Y, Z] scale factors (optional)
- actor_label: Optional custom name for the actor
Returns:
Response dictionary with success/failure status and the actor name if successful
"""
try:
blueprint_path = command.get("blueprint_path")
location = command.get("location", (0, 0, 0))
rotation = command.get("rotation", (0, 0, 0))
scale = command.get("scale", (1, 1, 1))
actor_label = command.get("actor_label", "")
if not blueprint_path:
log.log_error("Missing required parameters for spawn_blueprint")
return {"success": False, "error": "Missing required parameters"}
log.log_command("spawn_blueprint", f"Blueprint: {blueprint_path}, Label: {actor_label}")
# Convert to Unreal types
loc = uc.to_unreal_vector(location)
rot = uc.to_unreal_rotator(rotation)
scale_vec = uc.to_unreal_vector(scale)
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
actor = gen_bp_utils.spawn_blueprint(blueprint_path, loc, rot, scale_vec, actor_label)
if actor:
actor_name = actor.get_actor_label()
log.log_result("spawn_blueprint", True, f"Spawned blueprint: {blueprint_path} as {actor_name}")
return {"success": True, "actor_name": actor_name}
else:
log.log_error(f"Failed to spawn blueprint: {blueprint_path}")
return {"success": False, "error": f"Failed to spawn blueprint: {blueprint_path}"}
except Exception as e:
log.log_error(f"Error spawning blueprint: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_add_nodes_bulk(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to add multiple nodes to a Blueprint graph in a single operation
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- function_id: ID of the function to add the nodes to
- nodes: Array of node definitions, each containing:
* id: Optional ID for referencing the node (string)
* node_type: Type of node to add (string)
* node_position: Position of the node in the graph [X, Y]
* node_properties: Properties to set on the node (optional)
Returns:
Response dictionary with success/failure status and node IDs mapped to reference IDs
"""
try:
blueprint_path = command.get("blueprint_path")
function_id = command.get("function_id")
nodes = command.get("nodes", [])
if not blueprint_path or not function_id or not nodes:
log.log_error("Missing required parameters for add_nodes_bulk")
return {"success": False, "error": "Missing required parameters"}
log.log_command("add_nodes_bulk", f"Blueprint: {blueprint_path}, Adding {len(nodes)} nodes")
# Prepare nodes in the format expected by the C++ function
nodes_json = json.dumps(nodes)
# Call the C++ implementation from UGenBlueprintNodeCreator
node_creator = unreal.GenBlueprintNodeCreator
results_json = node_creator.add_nodes_bulk(blueprint_path, function_id, nodes_json)
if results_json:
results = json.loads(results_json)
node_mapping = {}
# Create a mapping from reference IDs to actual node GUIDs
for node_result in results:
if "ref_id" in node_result:
node_mapping[node_result["ref_id"]] = node_result["node_guid"]
else:
# For nodes without a reference ID, just include the GUID
node_mapping[f"node_{len(node_mapping)}"] = node_result["node_guid"]
log.log_result("add_nodes_bulk", True, f"Added {len(results)} nodes to {blueprint_path}")
return {"success": True, "nodes": node_mapping}
else:
log.log_error(f"Failed to add nodes to {blueprint_path}")
return {"success": False, "error": f"Failed to add nodes to {blueprint_path}"}
except Exception as e:
log.log_error(f"Error adding nodes: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_connect_nodes_bulk(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to connect multiple pairs of nodes in a Blueprint graph
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- function_id: ID of the function containing the nodes
- connections: Array of connection definitions, each containing:
* source_node_id: ID of the source node
* source_pin: Name of the source pin
* target_node_id: ID of the target node
* target_pin: Name of the target pin
Returns:
Response dictionary with detailed connection results
"""
try:
blueprint_path = command.get("blueprint_path")
function_id = command.get("function_id")
connections = command.get("connections", [])
if not blueprint_path or not function_id or not connections:
log.log_error("Missing required parameters for connect_nodes_bulk")
return {"success": False, "error": "Missing required parameters"}
log.log_command("connect_nodes_bulk", f"Blueprint: {blueprint_path}, Making {len(connections)} connections")
# Convert connections list to JSON for C++ function
connections_json = json.dumps(connections)
# Call the C++ implementation - now returns a JSON string instead of boolean
gen_bp_utils = unreal.GenBlueprintUtils
result_json = gen_bp_utils.connect_nodes_bulk(blueprint_path, function_id, connections_json)
# Parse the JSON result
try:
result_data = json.loads(result_json)
log.log_result("connect_nodes_bulk", result_data.get("success", False),
f"Connected {result_data.get('successful_connections', 0)}/{result_data.get('total_connections', 0)} node pairs in {blueprint_path}")
# Return the full result data for detailed error reporting
return result_data
except json.JSONDecodeError:
log.log_error(f"Failed to parse JSON result from connect_nodes_bulk: {result_json}")
return {"success": False, "error": "Failed to parse connection results"}
except Exception as e:
log.log_error(f"Error connecting nodes: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_delete_node(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to delete a node from a Blueprint graph
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- function_id: ID of the function containing the node
- node_id: ID of the node to delete
Returns:
Response dictionary with success/failure status
"""
try:
blueprint_path = command.get("blueprint_path")
function_id = command.get("function_id")
node_id = command.get("node_id")
if not blueprint_path or not function_id or not node_id:
log.log_error("Missing required parameters for delete_node")
return {"success": False, "error": "Missing required parameters"}
log.log_command("delete_node", f"Blueprint: {blueprint_path}, Node ID: {node_id}")
# Call the C++ implementation from UGenBlueprintNodeCreator
node_creator = unreal.GenBlueprintNodeCreator
success = node_creator.delete_node(blueprint_path, function_id, node_id)
if success:
log.log_result("delete_node", True, f"Deleted node {node_id} from {blueprint_path}")
return {"success": True}
else:
log.log_error(f"Failed to delete node {node_id} from {blueprint_path}")
return {"success": False, "error": f"Failed to delete node {node_id}"}
except Exception as e:
log.log_error(f"Error deleting node: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_get_all_nodes(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to get all nodes in a Blueprint graph
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- function_id: ID of the function to get nodes from
Returns:
Response dictionary with success/failure status and a list of nodes with their details
"""
try:
blueprint_path = command.get("blueprint_path")
function_id = command.get("function_id")
if not blueprint_path or not function_id:
log.log_error("Missing required parameters for get_all_nodes")
return {"success": False, "error": "Missing required parameters"}
log.log_command("get_all_nodes", f"Blueprint: {blueprint_path}, Function ID: {function_id}")
# Call the C++ implementation from UGenBlueprintNodeCreator
node_creator = unreal.GenBlueprintNodeCreator
nodes_json = node_creator.get_all_nodes_in_graph(blueprint_path, function_id)
if nodes_json:
# Parse the JSON response
try:
nodes = json.loads(nodes_json)
log.log_result("get_all_nodes", True, f"Retrieved {len(nodes)} nodes from {blueprint_path}")
return {"success": True, "nodes": nodes}
except json.JSONDecodeError as e:
log.log_error(f"Error parsing nodes JSON: {str(e)}")
return {"success": False, "error": f"Error parsing nodes JSON: {str(e)}"}
else:
log.log_error(f"Failed to get nodes from {blueprint_path}")
return {"success": False, "error": "Failed to get nodes"}
except Exception as e:
log.log_error(f"Error getting nodes: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_get_node_suggestions(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to get suggestions for a node type in Unreal Blueprints
Args:
command: The command dictionary containing:
- node_type: The partial or full node type to get suggestions for (e.g., "Add", "FloatToDouble")
Returns:
Response dictionary with success/failure status and a list of suggested node types
"""
try:
node_type = command.get("node_type")
if not node_type:
log.log_error("Missing required parameter 'node_type' for get_node_suggestions")
return {"success": False, "error": "Missing required parameter 'node_type'"}
log.log_command("get_node_suggestions", f"Node Type: {node_type}")
# Call the C++ implementation from UGenBlueprintNodeCreator
node_creator = unreal.GenBlueprintNodeCreator
suggestions_result = node_creator.get_node_suggestions(node_type)
if suggestions_result:
if suggestions_result.startswith("SUGGESTIONS:"):
suggestions = suggestions_result[len("SUGGESTIONS:"):].split(", ")
log.log_result("get_node_suggestions", True, f"Retrieved {len(suggestions)} suggestions for {node_type}")
return {"success": True, "suggestions": suggestions}
else:
log.log_error(f"Unexpected response format from get_node_suggestions: {suggestions_result}")
return {"success": False, "error": "Unexpected response format from Unreal"}
else:
log.log_result("get_node_suggestions", False, f"No suggestions found for {node_type}")
return {"success": True, "suggestions": []} # Empty list for no matches
except Exception as e:
log.log_error(f"Error getting node suggestions: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
def handle_get_node_guid(command: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle a command to retrieve the GUID of a pre-existing node in a Blueprint graph.
Args:
command: The command dictionary containing:
- blueprint_path: Path to the Blueprint asset
- graph_type: "EventGraph" or "FunctionGraph"
- node_name: Name of the node (e.g., "BeginPlay") for EventGraph
- function_id: ID of the function for FunctionGraph to get FunctionEntry
Returns:
Response dictionary with the node's GUID or an error
"""
try:
blueprint_path = command.get("blueprint_path")
graph_type = command.get("graph_type", "EventGraph")
node_name = command.get("node_name", "")
function_id = command.get("function_id", "")
if not blueprint_path:
log.log_error("Missing blueprint_path for get_node_guid")
return {"success": False, "error": "Missing blueprint_path"}
if graph_type not in ["EventGraph", "FunctionGraph"]:
log.log_error(f"Invalid graph_type: {graph_type}")
return {"success": False, "error": f"Invalid graph_type: {graph_type}"}
log.log_command("get_node_guid", f"Blueprint: {blueprint_path}, Graph: {graph_type}, Node: {node_name or function_id}")
# Call the C++ implementation
gen_bp_utils = unreal.GenBlueprintUtils
node_guid = gen_bp_utils.get_node_guid(blueprint_path, graph_type, node_name, function_id)
if node_guid:
log.log_result("get_node_guid", True, f"Found node GUID: {node_guid}")
return {"success": True, "node_guid": node_guid}
else:
log.log_error(f"Failed to find node: {node_name or 'FunctionEntry'}")
return {"success": False, "error": f"Node not found: {node_name or 'FunctionEntry'}"}
except Exception as e:
log.log_error(f"Error getting node GUID: {str(e)}", include_traceback=True)
return {"success": False, "error": str(e)}
|
import unreal
data = unreal.AutomatedAssetImportData()
data.destination_path = "/project/"
data.filenames = ["C:/project/.fbx"]
factory = unreal.FbxSceneImportFactory()
data.factory = factory
unreal.AssetToolsHelpers.get_asset_tools().import_assets_automated(data)
|
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## IMPORTS
#####
import unreal
print('starting ai_updates.py')
######## THING
#####
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## VARIABLES
#####
######## THING
#####
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## FUNCTIONS
#####
######## THING
#####
# ######## RETRIEVE UPDATE DATA
# #####
# def retrieve_update_data(packname='visai'):
# if packname=="visai":
# dataURL = 'https://ai.vis-foundation.com/project/.html' # Get Data
# elif packname=="visgm":
# dataURL = 'https://gm.vis-foundation.com/project/.html' # Get Data
# else:
# return "pack not recognized. use all lowercase, no spaces"
# # Cleanup
# thePage = str(urllib2.urlopen(dataURL).read()) # Format to a string
# parsedReq = thePage.split("#") # Split String Data
# del parsedReq[0] # Remove Null Data
# del parsedReq[-1] # Remove Null Data
# return parsedReq
# ######## CLEAN UPDATE DATA
# #####
# def clean_update_data(listToClean):
# versionUpdateDataList = []
# for i in listToClean:
# j = i.replace(' ','')
# k = j.replace('\\n','')
# versionUpdateDataList.append(k)
# return versionUpdateDataList
#!-!-!-!-!-!-!-!-!-!-!-!-!-!-#
#####
######## THE APP
#####
######## THING
#####
print('ai_updates.py has been initialized')
|
import unreal
from unreal import (
StaticMesh,
)
from collections import defaultdict
# input
static_mesh: StaticMesh
edge_max: int
face_count_max: int
print_log: bool
# output
result: bool
log_str: str
mesh_path = static_mesh.get_path_name()
def are_normals_aligned(normals, tolerance=1e-4):
"""判断所有法线是否一致(即mesh是否为平面)"""
if not normals:
return False
ref = normals[0]
for n in normals[1:]:
if (ref - n).length() > tolerance:
return False
return True
def is_colinear(p1, p2, p3, tolerance=1e-4):
"""判断三点是否共线"""
v1 = unreal.Vector(p2.x - p1.x, p2.y - p1.y, p2.z - p1.z)
v2 = unreal.Vector(p3.x - p2.x, p3.y - p2.y, p3.z - p2.z)
cross = v1.cross(v2)
colinear = cross.length() < tolerance
# unreal.log_warning(f"共线判断: {p1}, {p2}, {p3} -> {colinear} (cross.length={cross.length()})")
return colinear
def count_polygon_edges_by_merging(vertices, boundary_edges, tolerance=1e-4):
"""
统计多边形边数(合并共线边),输入为顶点列表和边界边集合
1. 构建边界点的邻接表
2. 顺序遍历边界点,形成有序边界点序列
3. 遍历有序点序列,合并共线边,统计真实边数
"""
edge_map = defaultdict(list)
for a, b in boundary_edges:
edge_map[a].append(b)
edge_map[b].append(a)
start = next(iter(edge_map)) # 任取一个边界点作为起点
ordered = [start]
prev = None
curr = start
# 顺序遍历边界点,形成闭环
while True:
next_points = [p for p in edge_map[curr] if p != prev]
if not next_points:
break
next_pt = next_points[0]
ordered.append(next_pt)
prev, curr = curr, next_pt
if curr == start:
break
# 如果首尾重复,去掉最后一个
if ordered[0] == ordered[-1]:
ordered = ordered[:-1]
# 合并共线边,统计真实边数
count = 0
n = len(ordered)
for i in range(n):
p1 = vertices[ordered[i - 1]]
p2 = vertices[ordered[i]]
p3 = vertices[ordered[(i + 1) % n]]
if not is_colinear(p1, p2, p3, tolerance):
count += 1
# unreal.log_warning(f"合并共线后边数: {count}")
return count
def get_polygon_edge_count(vertices, triangles, tolerance=1e-4):
"""
统计平面mesh的多边形边数,排除有洞的情况
"""
edge_count = defaultdict(int)
for i in range(0, len(triangles), 3):
idx0, idx1, idx2 = triangles[i], triangles[i+1], triangles[i+2]
edges = [
tuple(sorted((idx0, idx1))),
tuple(sorted((idx1, idx2))),
tuple(sorted((idx2, idx0))),
]
for edge in edges:
edge_count[edge] += 1
boundary_edges = [edge for edge, count in edge_count.items() if count == 1]
if not boundary_edges:
return 0
# 统计边界环数量
edge_map = defaultdict(list)
for a, b in boundary_edges:
edge_map[a].append(b)
edge_map[b].append(a)
visited = set()
rings = 0
for start in edge_map:
if start in visited:
continue
rings += 1
curr = start
prev = None
while True:
visited.add(curr)
next_points = [p for p in edge_map[curr] if p != prev and p not in visited]
if not next_points:
break
next_pt = next_points[0]
prev, curr = curr, next_pt
if curr == start:
break
if rings > 1:
# 有多个环,说明有洞,直接返回0
return 0
# 只有一个环,正常统计边数
count = count_polygon_edges_by_merging(vertices, boundary_edges, tolerance)
return count
def check_mesh_planarity(static_mesh: StaticMesh,edge_max:int, face_count_max: int, print_log: bool = True) -> tuple:
section_count = static_mesh.get_num_sections(0)
for section_index in range(section_count):
vertices, triangles, normals, uvs, tangents = unreal.ProceduralMeshLibrary.get_section_from_static_mesh(static_mesh, 0, section_index)
# 判断是否共面
if are_normals_aligned(normals):
polygon_edge_count = get_polygon_edge_count(vertices, triangles)
# polygon_edge_count = 0 代表有洞,排除掉
if polygon_edge_count > 2 and polygon_edge_count <= edge_max:
face_count = len(triangles) // 3
if face_count > face_count_max:
log_str = f"""特效的平面Mesh的面数超过最大限制(最大{face_count_max}):
Mesh路径: {mesh_path}
section_index: {section_index}
识别的边数: {polygon_edge_count}
三角面数: {face_count}
"""
if print_log:
unreal.log_warning(log_str)
return (False,log_str)
return (True, "")
result,log_str = check_mesh_planarity(static_mesh,edge_max, face_count_max, print_log)
|
# Copyright (c) <2021> Side Effects Software Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The name of Side Effects Software may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY SIDE EFFECTS SOFTWARE "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL SIDE EFFECTS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unreal
""" Example script for instantiating an asset, cooking it and baking all of
its outputs.
"""
_g_wrapper = None
def get_test_hda_path():
return '/project/.pig_head_subdivider_v01'
def get_test_hda():
return unreal.load_object(None, get_test_hda_path())
def on_post_instantiation(in_wrapper):
print('on_post_instantiation')
# in_wrapper.on_post_instantiation_state_exited_delegate_delegate.remove_callable(on_post_instantiation)
# Set parameter values for the next cook
# in_wrapper.set_bool_parameter_value('add_instances', True)
# in_wrapper.set_int_parameter_value('num_instances', 8)
in_wrapper.set_parameter_tuples({
'add_instances': unreal.HoudiniParameterTuple(bool_values=(True, )),
'num_instances': unreal.HoudiniParameterTuple(int32_values=(8, )),
})
# Print all parameter values
param_tuples = in_wrapper.get_parameter_tuples()
print('parameter tuples: {}'.format(len(param_tuples) if param_tuples else 0))
if param_tuples:
for param_tuple_name, param_tuple in param_tuples.items():
print('parameter tuple name: {}'.format(param_tuple_name))
print('\tbool_values: {}'.format(param_tuple.bool_values))
print('\tfloat_values: {}'.format(param_tuple.float_values))
print('\tint32_values: {}'.format(param_tuple.int32_values))
print('\tstring_values: {}'.format(param_tuple.string_values))
# Force a cook/recook
in_wrapper.recook()
def on_post_bake(in_wrapper, success):
in_wrapper.on_post_bake_delegate.remove_callable(on_post_bake)
print('bake complete ... {}'.format('success' if success else 'failed'))
# Delete the hda after the bake
in_wrapper.delete_instantiated_asset()
global _g_wrapper
_g_wrapper = None
def on_post_process(in_wrapper):
print('on_post_process')
# in_wrapper.on_post_processing_delegate.remove_callable(on_post_process)
# Print out all outputs generated by the HDA
num_outputs = in_wrapper.get_num_outputs()
print('num_outputs: {}'.format(num_outputs))
if num_outputs > 0:
for output_idx in range(num_outputs):
identifiers = in_wrapper.get_output_identifiers_at(output_idx)
print('\toutput index: {}'.format(output_idx))
print('\toutput type: {}'.format(in_wrapper.get_output_type_at(output_idx)))
print('\tnum_output_objects: {}'.format(len(identifiers)))
if identifiers:
for identifier in identifiers:
output_object = in_wrapper.get_output_object_at(output_idx, identifier)
output_component = in_wrapper.get_output_component_at(output_idx, identifier)
is_proxy = in_wrapper.is_output_current_proxy_at(output_idx, identifier)
print('\t\tidentifier: {}'.format(identifier))
print('\t\toutput_object: {}'.format(output_object.get_name() if output_object else 'None'))
print('\t\toutput_component: {}'.format(output_component.get_name() if output_component else 'None'))
print('\t\tis_proxy: {}'.format(is_proxy))
print('')
# bind to the post bake delegate
in_wrapper.on_post_bake_delegate.add_callable(on_post_bake)
# Bake all outputs to actors
print('baking all outputs to actors')
in_wrapper.bake_all_outputs_with_settings(
unreal.HoudiniEngineBakeOption.TO_ACTOR,
replace_previous_bake=False,
remove_temp_outputs_on_success=False)
def run():
# get the API singleton
api = unreal.HoudiniPublicAPIBlueprintLib.get_api()
global _g_wrapper
# instantiate an asset, disabling auto-cook of the asset (so we have to
# call wrapper.reCook() to cook it)
_g_wrapper = api.instantiate_asset(get_test_hda(), unreal.Transform(), enable_auto_cook=False)
# Bind to the on post instantiation delegate (before the first cook)
_g_wrapper.on_post_instantiation_delegate.add_callable(on_post_instantiation)
# Bind to the on post processing delegate (after a cook and after all
# outputs have been generated in Unreal)
_g_wrapper.on_post_processing_delegate.add_callable(on_post_process)
if __name__ == '__main__':
run()
|
import unreal
class PamuxAssetUtils:
@staticmethod
def ensureAssetDirectoryExists(dir: str):
if not unreal.EditorAssetLibrary.does_directory_exist(dir):
unreal.EditorAssetLibrary.make_directory(dir)
@staticmethod
def split_asset_path(asset_path: str):
last_slash = asset_path.rindex("/")
return asset_path[0:last_slash], asset_path[last_slash+1:]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.