diff --git a/molecularnodes/__init__.py b/molecularnodes/__init__.py index a6558be7..80869f6d 100644 --- a/molecularnodes/__init__.py +++ b/molecularnodes/__init__.py @@ -15,6 +15,7 @@ from bpy.app.handlers import frame_change_post, load_post, save_post from . import entities, operators, props, session, ui +from .utils import add_current_module_to_path from .ui import pref from .ui.node_menu import MN_add_node_menu from .ui.panel import MN_PT_Scene, pt_object_context, change_style_node_menu @@ -48,7 +49,7 @@ def register(): except Exception as e: print(e) pass - + add_current_module_to_path() bpy.types.NODE_MT_add.append(MN_add_node_menu) bpy.types.VIEW3D_MT_object_context_menu.prepend(pt_object_context) bpy.types.NODE_MT_context_menu.prepend(change_style_node_menu) diff --git a/molecularnodes/blender/__init__.py b/molecularnodes/blender/__init__.py index 94a1d5c5..a9e8ba98 100644 --- a/molecularnodes/blender/__init__.py +++ b/molecularnodes/blender/__init__.py @@ -1,19 +1 @@ -from pathlib import Path -from typing import Union -import bpy - - -def path_resolve(path: Union[str, Path]) -> Path: - if isinstance(path, str): - return Path(bpy.path.abspath(path)) - elif isinstance(path, Path): - return Path(bpy.path.abspath(str(path))) - else: - raise ValueError(f"Unable to resolve path: {path}") - - -def active_object(context: bpy.types.Context = None) -> bpy.types.Object: - if context is None: - return bpy.context.active_object - - return context.active_object +from .utils import path_resolve diff --git a/molecularnodes/blender/bones.py b/molecularnodes/blender/bones.py deleted file mode 100644 index d52148ea..00000000 --- a/molecularnodes/blender/bones.py +++ /dev/null @@ -1,115 +0,0 @@ -import bpy -import numpy as np -from . import mesh, coll - - -def clear_armature(object): - for mod in object.modifiers: - if mod.type == "ARMATURE": - if mod.object: - bpy.data.objects.remove(mod.object) - object.modifiers.remove(mod) - - -def add_bones(object, name="armature"): - # creates bones and assigns correct weights - - clear_armature(object) - - bone_positions, bone_weights, chain_ids = get_bone_positions(object) - - armature = create_bones(bone_positions, chain_ids) - for i in range(bone_weights.shape[1]): - group = object.vertex_groups.new(name=f"mn_armature_{i}") - vertex_indices = np.where(bone_weights[:, i] == 1)[0] - group.add(vertex_indices.tolist(), 1, "ADD") - - object.select_set(True) - armature.select_set(True) - bpy.context.view_layer.objects.active = armature - bpy.ops.object.parent_set(type="ARMATURE") - - bpy.context.view_layer.objects.active = object - bpy.ops.object.modifier_move_to_index("EXEC_DEFAULT", modifier="Armature", index=0) - - return armature - - -def get_bone_positions(object): - positions, atom_name, chain_id, res_id, sec_struct = [ - mesh.named_attribute(object, att) - for att in ["position", "atom_name", "chain_id", "res_id", "sec_struct"] - ] - - is_alpha_carbon = atom_name == 2 - idx = np.where(is_alpha_carbon)[0] - bone_positions = positions[idx, :] - bone_positions = np.vstack((bone_positions, positions[-1])) - group_ids = np.cumsum(is_alpha_carbon) - groups = np.unique(group_ids) - bone_weights = np.zeros((len(group_ids), len(groups))) - - for i, unique_id in enumerate(groups): - bone_weights[:, i] = ((group_ids - 1) == unique_id).astype(int) - - print("get_bone_positions") - return bone_positions, bone_weights, chain_id[idx] - - -def get_bone_weights(object): - print("hello world") - - -def create_bones(positions, chain_ids, name="armature"): - bpy.ops.object.add(type="ARMATURE", enter_editmode=True) - object = bpy.context.active_object - object.name = name - coll.armature().objects.link(object) - armature = object.data - armature.name = f"{name}_frame" - arm_name = armature.name - bones = [] - # add bones - for i, position in enumerate(positions): - try: - pos_a = position - pos_b = positions[i + 1, :] - except: - continue - - bone_name = f"mn_armature_{i}" - bone = armature.edit_bones.new(bone_name) - bone.head = pos_a - bone.tail = pos_b - bones.append(bone.name) - - armature = bpy.data.armatures[arm_name] - bones_a = bones.copy() - bones_b = bones.copy() - bones_b.pop(0) - bones = zip(bones_a, bones_b) - - for bone_a, bone_b in bones: - armature.edit_bones.active = armature.edit_bones[bone_a] - for bone in [bone_a, bone_b]: - armature.edit_bones[bone].select = True - bpy.ops.armature.parent_set(type="CONNECTED") - for bone in [bone_a, bone_b]: - armature.edit_bones[bone].select = False - bpy.ops.object.editmode_toggle() - - return object - - -class MN_MT_Add_Armature(bpy.types.Operator): - bl_idname = "mn.add_armature" - bl_label = "Add Armature" - bl_description = ( - "Automatically add armature for each amino acid of the structure " - ) - - def execute(self, context): - object = context.active_object - add_bones(bpy.data.objects[object.name], name=object.name) - - return {"FINISHED"} diff --git a/molecularnodes/blender/coll.py b/molecularnodes/blender/coll.py index f5359bba..0b0f6328 100644 --- a/molecularnodes/blender/coll.py +++ b/molecularnodes/blender/coll.py @@ -1,70 +1,34 @@ import bpy +from bpy.types import Collection +from ..bpyd.collection import create_collection -def mn(): - """Return the MolecularNodes Collection +def mn() -> Collection: + "Return the 'MolecularNodes' collection, creating it first if required" + return create_collection("MolecularNodes") - The collection called 'MolecularNodes' inside the Blender scene is returned. If the - collection does not exist first, it is created. - """ - coll = bpy.data.collections.get('MolecularNodes') - if not coll: - coll = bpy.data.collections.new('MolecularNodes') - bpy.context.scene.collection.children.link(coll) - return coll +def data() -> Collection: + "Return the MolecularNodes/data collection and disable it" + name = ".MN_data" -def armature(name='MN_armature'): - coll = bpy.data.collections.get(name) - if not coll: - coll = bpy.data.collections.new(name) - mn().children.link(coll) - return coll + try: + return bpy.data.collections[name] + except KeyError: + collection = create_collection(name=name, parent=mn()) + bpy.context.view_layer.layer_collection.children["MolecularNodes"].children[ + collection.name + ].exclude = True + return collection -def data(suffix=""): - """A collection for storing MN related data objects. - """ - name = f"MN_data{suffix}" - collection = bpy.data.collections.get(name) - if not collection: - collection = bpy.data.collections.new(name) - mn().children.link(collection) +def frames(name: str = "") -> Collection: + "Return a collection for storing the objects that are the frames of a trajectory" + return create_collection(f".data_{name}_frames", parent=data()) - # disable the view of the data collection - bpy.context.view_layer.layer_collection.children['MolecularNodes'].children[name].exclude = True - return collection - -def frames(name="", parent=None, suffix="_frames"): - """Create a Collection for Frames of a Trajectory - - Args: - name (str, optional): Name of the collection for the frames. Defaults to "". - parent (_type_, optional): A blender collection which will become the parent - collection. Defaults to the MolecularNodes collection if None. - """ - coll_frames = bpy.data.collections.new(name + suffix) - if not parent: - mn().children.link(coll_frames) - else: - parent.children.link(coll_frames) - - return coll_frames - - -def cellpack(name="", parent=None, fallback=False): +def cellpack(name: str = "") -> Collection: + "Return a collection for storing the instances for a CellPack Ensemble" full_name = f"cellpack_{name}" - coll = bpy.data.collections.get(full_name) - if coll and fallback: - return coll - - coll = bpy.data.collections.new(full_name) - - if parent: - parent.children.link(coll) - else: - data().children.link(coll) - - return coll + return create_collection(full_name, parent=data()) diff --git a/molecularnodes/blender/material.py b/molecularnodes/blender/material.py index d34d0e4a..d2af7ee7 100644 --- a/molecularnodes/blender/material.py +++ b/molecularnodes/blender/material.py @@ -1,8 +1,8 @@ -import bpy -import os +from bpy.types import Material +from ..bpyd.material import append_from_blend from ..utils import MN_DATA_FILE -materials = [ +MATERIAL_NAMES = [ "MN Default", "MN Flat Outline", "MN Squishy", @@ -11,23 +11,17 @@ ] -def append_material(name: str) -> bpy.types.Material: - mat = bpy.data.materials.get(name) - - if not mat: - bpy.ops.wm.append( - directory=os.path.join(MN_DATA_FILE, "Material"), - filename=name, - link=False, - ) - - return bpy.data.materials[name] +def append(name: str) -> Material: + "Append a material from the MN_DATA_FILE." + return append_from_blend(name, MN_DATA_FILE) def add_all_materials() -> None: - for mat in materials: - append_material(mat) + "Append all pre-defined materials from the MN_DATA_FILE." + for name in MATERIAL_NAMES: + append(name) -def default() -> bpy.types.Material: - return append_material("MN Default") +def default() -> Material: + "Return the default material." + return append("MN Default") diff --git a/molecularnodes/blender/mesh.py b/molecularnodes/blender/mesh.py index f241ef7b..400f0c70 100644 --- a/molecularnodes/blender/mesh.py +++ b/molecularnodes/blender/mesh.py @@ -1,350 +1,12 @@ import bpy import numpy as np -from typing import Optional -from enum import Enum +from . import coll, nodes +from ..bpyd.attribute import AttributeTypes +from ..bpyd.object import create_bob -from . import coll -from . import nodes -from dataclasses import dataclass - -@dataclass -class AttributeTypeInfo: - dname: str - dtype: type - width: int - - -TYPES = { - key: AttributeTypeInfo(*values) - for key, values in { - "FLOAT_VECTOR": ("vector", float, [3]), - "FLOAT_COLOR": ("color", float, [4]), - "QUATERNION": ("value", float, [4]), - "INT": ("value", int, [1]), - "FLOAT": ("value", float, [1]), - "INT32_2D": ("value", int, [2]), - "FLOAT4X4": ("value", float, [4, 4]), - "BOOLEAN": ("value", bool, [1]), - }.items() -} - - -class AttributeMismatchError(Exception): - def __init__(self, message): - self.message = message - super().__init__(self.message) - - -def centre(array: np.array): - return np.mean(array, axis=0) - - -def centre_weighted(array: np.ndarray, weight: np.ndarray): - return np.sum(array * weight.reshape((len(array), 1)), axis=0) / np.sum(weight) - - -class ObjectTracker: - """ - A context manager for tracking new objects in Blender. - - This class provides a way to track new objects that are added to Blender's bpy.data.objects collection. - It stores the current objects when entering the context and provides a method to find new objects that were added when exiting the context. - - Methods - ------- - new_objects(): - Returns a list of new objects that were added to bpy.data.objects while in the context. - """ - - def __enter__(self): - """ - Store the current objects and their names when entering the context. - - Returns - ------- - self - The instance of the class. - """ - self.objects = list(bpy.context.scene.objects) - return self - - def __exit__(self, type, value, traceback): - pass - - def new_objects(self): - """ - Find new objects that were added to bpy.data.objects while in the context. - - Use new_objects()[-1] to get the most recently added object. - - Returns - ------- - list - A list of new objects. - """ - obj_names = list([o.name for o in self.objects]) - current_objects = bpy.context.scene.objects - new_objects = [] - for obj in current_objects: - if obj.name not in obj_names: - new_objects.append(obj) - return new_objects - - def latest(self): - """ - Get the most recently added object. - - This method returns the most recently added object to bpy.data.objects while in the context. - - Returns - ------- - bpy.types.Object - The most recently added object. - """ - return self.new_objects()[-1] - - -def create_object( - vertices: np.ndarray = [], - edges: np.ndarray = [], - faces: np.ndarray = [], - name: str = "NewObject", - collection: bpy.types.Collection = None, -) -> bpy.types.Object: - """ - Create a new Blender object, initialised with locations for each vertex. - - If edges and faces are supplied then these are also created on the mesh. - - Parameters - ---------- - vertices : np.ndarray, optional - The vertices of the vertices as a numpy array. Defaults to None. - edges : np.ndarray, optional - The edges of the object as a numpy array. Defaults to None. - faces : np.ndarray, optional - The faces of the object as a numpy array. Defaults to None. - name : str, optional - The name of the object. Defaults to 'NewObject'. - collection : bpy.types.Collection, optional - The collection to link the object to. Defaults to None. - - Returns - ------- - bpy.types.Object - The created object. - """ - mesh = bpy.data.meshes.new(name) - - mesh.from_pydata(vertices=vertices, edges=edges, faces=faces) - - object = bpy.data.objects.new(name, mesh) - - if not collection: - # Add the object to the scene if no collection is specified - collection = bpy.data.collections["Collection"] - - collection.objects.link(object) - - object["type"] = "molecule" - - return object - - -class AttributeDataType(Enum): - FLOAT_VECTOR = "FLOAT_VECTOR" - FLOAT_COLOR = "FLOAT_COLOR" - QUATERNION = "QUATERNION" - FLOAT = "FLOAT" - INT = "INT" - BOOLEAN = "BOOLEAN" - FLOAT4X4 = "FLOAT4X4" - - -def store_named_attribute( - obj: bpy.types.Object, - name: str, - data: np.ndarray, - data_type: Optional[str] = None, - domain: str = "POINT", - overwrite: bool = True, -) -> bpy.types.Attribute: - """ - Adds and sets the values of an attribute on the object. - - Parameters - ---------- - obj : bpy.types.Object - The Blender object. - name : str - The name of the attribute. - data : np.ndarray - The attribute data as a numpy array. - type : str, optional - The data type of the attribute. Defaults to None. Possible values are: - 'FLOAT_VECTOR', 'FLOAT_COLOR', 'FLOAT4X4', 'QUATERNION', 'FLOAT', 'INT', 'BOOLEAN' - domain : str, optional - The domain of the attribute. Defaults to 'POINT'. Currently, only 'POINT', 'EDGE', - and 'FACE' have been tested. - overwrite : bool, optional - Whether to overwrite an existing attribute with the same name. Defaults to True. - - Returns - ------- - bpy.types.Attribute - The added attribute. - """ - - # if the datatype isn't specified, try to guess the datatype based on the - # datatype of the ndarray. This should work but ultimately won't guess between - # the quaternion and color datatype, so will just default to color - if data_type is None: - dtype = data.dtype - shape = data.shape - - if len(shape) == 1: - if np.issubdtype(dtype, np.int_): - data_type = "INT" - elif np.issubdtype(dtype, np.float_): - data_type = "FLOAT" - elif np.issubdtype(dtype, np.bool_): - data_type = "BOOLEAN" - elif len(shape) == 3 and shape[1:] == (4, 4): - data_type = "FLOAT4X4" - else: - if shape[1] == 3: - data_type = "FLOAT_VECTOR" - elif shape[1] == 4: - data_type = "FLOAT_COLOR" - else: - data_type = "FLOAT" - # catch if the data_type still wasn't determined and report info about the data - if data_type is None: - data_type = "FLOAT" - # raise ValueError( - # f"Unable to determine data type for {data}, {shape=}, {dtype=}" - # ) - - attribute = obj.data.attributes.get(name) # type: ignore - if not attribute or not overwrite: - attribute = obj.data.attributes.new(name, data_type, domain) # type: ignore - - if len(data) != len(attribute.data): - raise AttributeMismatchError( - f"Data length {len(data)}, dimensions {data.shape} does not equal the size of the target domain {domain}, len={len(attribute.data)=}" - ) - - # the 'foreach_set' requires a 1D array, regardless of the shape of the attribute - # it also requires the order to be 'c' or blender might crash!! - attribute.data.foreach_set(TYPES[data_type].dname, data.reshape(-1)) - - # The updating of data doesn't work 100% of the time (see: - # https://projects.blender.org/blender/blender/issues/118507) so this resetting of a - # single vertex is the current fix. Not great as I can see it breaking when we are - # missing a vertex - but for now we shouldn't be dealing with any situations where this - # is the case For now we will set a single vert to it's own position, which triggers a - # proper refresh of the object data. - try: - obj.data.vertices[0].co = obj.data.vertices[0].co # type: ignore - except AttributeError: - obj.data.update() # type: ignore - - return attribute - - -def named_attribute( - object: bpy.types.Object, name="position", evaluate=False -) -> np.ndarray: - """ - Get the attribute data from the object. - - Parameters: - object (bpy.types.Object): The Blender object. - name (str, optional): The name of the attribute. Defaults to 'position'. - - Returns: - np.ndarray: The attribute data as a numpy array. - """ - if evaluate: - object = evaluated(object) - attribute_names = object.data.attributes.keys() - verbose = False - if name not in attribute_names: - if verbose: - raise AttributeError( - f"The selected attribute '{name}' does not exist on the mesh. \ - Possible attributes are: {attribute_names=}" - ) - else: - raise AttributeError( - f"The selected attribute '{name}' does not exist on the mesh." - ) - - # Get the attribute and some metadata about it from the object - att = object.data.attributes[name] - n_att = len(att.data) - data_type = TYPES[att.data_type] - dim = data_type.width - n_values = n_att - for dimension in dim: - n_values *= dimension - - # data to and from attributes has to be given and taken as a 1D array - # we have the initialise the array first with the appropriate length, then we can - # fill it with the given data using the 'foreach_get' method which is super fast C++ - # internal method - array = np.zeros(n_values, dtype=data_type.dtype) - # it is currently not really consistent, but to get the values you need to use one of - # the 'value', 'vector', 'color' etc from the types dict. This I could only figure - # out through trial and error. I assume this might be changed / improved in the future - att.data.foreach_get(data_type.dname, array) - - if dim == [1]: - return array - else: - # return an array with one row per item, even if a 1D attribute. Does this make sense? - return array.reshape((n_att, *dim)) - - -def import_vdb(file: str, collection: bpy.types.Collection = None) -> bpy.types.Object: - """ - Imports a VDB file as a Blender volume object, in the MolecularNodes collection. - - Parameters - ---------- - file : str - Path to the VDB file. - - Returns - ------- - bpy.types.Object - A Blender object containing the imported volume data. - """ - - # import the volume object - with ObjectTracker() as o: - bpy.ops.object.volume_import(filepath=file, files=[]) - object = o.latest() - - if collection: - # Move the object to the MolecularNodes collection - initial_collection = object.users_collection[0] - initial_collection.objects.unlink(object) - collection = coll.mn() - collection.objects.link(object) - - return object - - -def evaluated(object): - "Return an object which has the modifiers evaluated." - object.update_tag() - return object.evaluated_get(bpy.context.evaluated_depsgraph_get()) - - -def evaluate_using_mesh(object): +def evaluate_using_mesh(obj: bpy.types.Object) -> bpy.types.Object: """ Evaluate the object using a debug object. Some objects can't currently have their Geometry Node trees evaluated (such as volumes), so we source the geometry they create @@ -365,18 +27,21 @@ def evaluate_using_mesh(object): """ # create an empty mesh object. It's modifiers can be evaluated but some other # object types can't be currently through the API - debug = create_object() - mod = nodes.get_mod(debug) + bob = create_bob() + mod = nodes.get_mod(bob.object) mod.node_group = nodes.create_debug_group() - mod.node_group.nodes["Object Info"].inputs["Object"].default_value = object + mod.node_group.nodes["Object Info"].inputs["Object"].default_value = obj # need to use 'evaluate' otherwise the modifiers won't be taken into account - return evaluated(debug) + return bob.evaluate().object def create_data_object( - array, collection=None, name="DataObject", world_scale=0.01, fallback=False -): + array: np.ndarray, + name: str = "DataObject", + collection: str | bpy.types.Collection | None = None, + world_scale: float = 0.01, +) -> bpy.types.Object: # still requires a unique call TODO: figure out why # I think this has to do with the bcif instancing extraction array = np.unique(array) @@ -385,13 +50,13 @@ def create_data_object( if not collection: collection = coll.data() - object = create_object(locations, collection=collection, name=name) + bob = create_bob(locations, collection=collection, name=name) attributes = [ - ("rotation", "QUATERNION"), - ("assembly_id", "INT"), - ("chain_id", "INT"), - ("transform_id", "INT"), + ("rotation", AttributeTypes.QUATERNION), + ("assembly_id", AttributeTypes.INT), + ("chain_id", AttributeTypes.INT), + ("transform_id", AttributeTypes.INT), ] for column, type in attributes: @@ -404,8 +69,6 @@ def create_data_object( if np.issubdtype(data.dtype, str): data = np.unique(data, return_inverse=True)[1] - store_named_attribute( - object, name=column, data=data, data_type=type, domain="POINT" - ) + bob.store_named_attribute(data=data, name=column, atype=type) - return object + return bob.object diff --git a/molecularnodes/blender/node.py b/molecularnodes/blender/node.py deleted file mode 100644 index eaf0ed07..00000000 --- a/molecularnodes/blender/node.py +++ /dev/null @@ -1,37 +0,0 @@ -from abc import ABCMeta -from typing import Optional, Any -import warnings -import time -import numpy as np -import bpy - - -class Node(metaclass=ABCMeta): - def __init__(self, node: bpy.types.Node, chain=[]): - - self.node = node - self.group = node.id_data - self.chain = chain - - @property - def location(self): - return np.array(self.node.location) - - def new(self, name): - "Add a new node to the node group." - try: - return self.group.nodes.new(f'GeometryNode{name}') - except RuntimeError: - return self.group.nodes.new(f'ShaderNode{name}') - - def link(self, name, linkto=0, linkfrom=0): - "Create a new node along in the chain and create a link to it. Return the new node." - new_node = self.new(name) - new_node.location = self.location + np.array((200, 0)) - - self.group.links.new( - self.node.outputs[linkfrom], - new_node.inputs[linkto] - ) - - return Node(new_node, chain=self.chain + [self]) diff --git a/molecularnodes/blender/nodes.py b/molecularnodes/blender/nodes.py index 60ad89dd..7af763a7 100644 --- a/molecularnodes/blender/nodes.py +++ b/molecularnodes/blender/nodes.py @@ -1,89 +1,24 @@ import itertools import math import os -import warnings from ..utils import MN_DATA_FILE from . import material from typing import List, Optional -import time import bpy import numpy as np from .. import color, utils from . import mesh -import re +from .. import bpyd +from ..bpyd.nodes import ( + NodeGroupCreationError, + append_from_blend, + swap_tree, +) NODE_WIDTH = 180 -node_duplicate_pattern = r"\.\d{3}$" - - -def deduplicate_node_trees(node_trees: List[str]): - # Compile the regex pattern for matching a suffix of a dot followed by 3 numbers - node_duplicate_pattern = re.compile(r"\.\d{3}$") - to_remove: List[bpy.types.GeometryNodeTree] = [] - - for node_tree in node_trees: - # Check if the node tree's name matches the duplicate pattern and is not a "NodeGroup" - for node in node_tree.nodes: - if not ( - hasattr(node, "node_tree") - and node_duplicate_pattern.search(node.node_tree.name) - and "NodeGroup" not in node.node_tree.name - ): - continue - - old_name = node.node_tree.name - # Remove the numeric suffix to get the original name - name_sans = old_name.rsplit(".", 1)[0] - replacement = bpy.data.node_groups.get(name_sans) - if not replacement: - continue - - # print(f"matched {old_name} with {name_sans}") - node.node_tree = replacement - to_remove.append(bpy.data.node_groups[old_name]) - - for tree in to_remove: - try: - # remove the data from the blend file - bpy.data.node_groups.remove(tree) - except ReferenceError: - pass - - -def cleanup_duplicates(purge: bool = False): - # Collect all node trees from node groups, excluding "NodeGroup" named ones - node_trees = [tree for tree in bpy.data.node_groups if "NodeGroup" not in tree.name] - - # Call the deduplication function with the collected node trees - deduplicate_node_trees(node_trees) - - if purge: - # Purge orphan data blocks from the file - bpy.ops.outliner.orphans_purge() - - -class DuplicatePrevention: - def __init__(self, timing=False): - self.current_names: List[str] = [] - self.start_time = None - self.timing = timing - - def __enter__(self): - self.current_names = [tree.name for tree in bpy.data.node_groups] - if self.timing: - self.start_time = time.time() - - def __exit__(self, type, value, traceback): - new_trees = [ - tree for tree in bpy.data.node_groups if tree.name not in self.current_names - ] - deduplicate_node_trees(new_trees) - if self.timing: - end_time = time.time() - print(f"De-duplication time: {end_time - self.start_time:.2f} seconds") socket_types = { @@ -123,12 +58,6 @@ def __exit__(self, type, value, traceback): } -class NodeGroupCreationError(Exception): - def __init__(self, message): - self.message = message - super().__init__(self.message) - - def inputs(node): items = {} for item in node.interface.items_tree: @@ -206,18 +135,6 @@ def get_mod(object, name="MolecularNodes"): return node_mod -def format_node_name(name): - "Formats a node's name for nicer printing." - return ( - name.strip("MN_") - .replace("_", " ") - .title() - .replace("Dna", "DNA") - .replace("Topo ", "Topology ") - .replace("Plddt", "pLDDT") - ) - - def get_nodes_last_output(group): output = get_output(group) last = output.inputs[0].links[0].from_node @@ -285,36 +202,22 @@ def realize_instances(obj): insert_last_node(group, realize) -def swap(node: bpy.types.GeometryNode, new: str) -> None: +def swap(node: bpy.types.GeometryNode, tree: str | bpy.types.GeometryNodeTree) -> None: "Swap out the node's node_tree, while maintaining the possible old connections" - if isinstance(new, str): - tree = bpy.data.node_groups.get(new) - if not tree: - tree = append(new) - else: - tree = new - - with MaintainConnections(node): - node.node_tree = tree - node.name = node.label = tree.name + if isinstance(tree, str): + try: + tree = bpy.data.node_groups[tree] + except KeyError: + tree = append(tree) + swap_tree(node=node, tree=tree) -def append(node_name, link=False): - node = bpy.data.node_groups.get(node_name) - if not node or link: - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - with DuplicatePrevention(): - bpy.ops.wm.append( - "EXEC_DEFAULT", - directory=os.path.join(MN_DATA_FILE, "NodeTree"), - filename=node_name, - link=link, - use_recursive=True, - ) - return bpy.data.node_groups[node_name] +def append(name: str, link: bool = False) -> bpy.types.GeometryNodeTree: + "Append a GN node from the MN data file" + GN_TREES_PATH = os.path.join(MN_DATA_FILE, "NodeTree") + return append_from_blend(name, filepath=GN_TREES_PATH, link=link) def MN_micrograph_material(): @@ -363,7 +266,7 @@ def assign_material(node, new_material="default") -> None: material_socket.default_value = material.default() else: try: - material_socket.default_value = material.append_material(new_material) + material_socket.default_value = material.append(new_material) except Exception as e: print(f"Unable to use material {new_material}, error: {e}") @@ -389,86 +292,12 @@ def add_custom( node.width = width node.show_options = show_options node.name = name - # node.label = format_node_name(name) return node -class MaintainConnections: - # capture input and output links, so we can rebuild the links based on name - # and the sockets they were connected to - # as we collect them, remove the links so they aren't automatically connected - # when we change the node_tree for the group - - def __init__(self, node: bpy.types.GeometryNode) -> None: - self.node = node - self.input_links = [] - self.output_links = [] - - def __enter__(self): - "Store all the connections in and out of this node for rebuilding on exit." - self.node_tree = self.node.id_data - - for input in self.node.inputs: - for input_link in input.links: - self.input_links.append((input_link.from_socket, input.name)) - self.node_tree.links.remove(input_link) - - for output in self.node.outputs: - for output_link in output.links: - self.output_links.append((output.name, output_link.to_socket)) - self.node_tree.links.remove(output_link) - - try: - self.material = self.node.inputs["Material"].default_value - except KeyError: - self.material = None - - def __exit__(self, type, value, traceback): - "Rebuild the connections in and out of this node that were stored on entry." - # rebuild the links based on names of the sockets, not their identifiers - link = self.node_tree.links.new - for input_link in self.input_links: - try: - link(input_link[0], self.node.inputs[input_link[1]]) - except KeyError: - pass - for output_link in self.output_links: - try: - link(self.node.outputs[output_link[0]], output_link[1]) - except KeyError: - pass - - # reset all values to tree defaults - tree = self.node.node_tree - for item in tree.interface.items_tree: - if item.item_type == "PANEL": - continue - if item.in_out == "INPUT": - if hasattr(item, "default_value"): - self.node.inputs[item.identifier].default_value = item.default_value - - if self.material: - try: - self.node.inputs["Material"].default_value = self.material - except KeyError: - # the new node doesn't contain a material slot - pass - - -def swap_style_node(tree, node_style, style): - with MaintainConnections(node_style): - new_tree = append(styles_mapping[style]) - node_style.node_tree = new_tree - node_style.name = new_tree.name - # node_style.label = format_node_name(node_style.name) - - def change_style_node(obj: bpy.types.Object, style: str): - # get the node group that we are working on, to change the specific style node - tree = get_mod(obj).node_group - node_style = get_style_node(obj) - swap_style_node(tree=tree, node_style=node_style, style=style) + swap(get_style_node(obj), append(styles_mapping[style])) def create_starting_nodes_starfile(object, n_images=1): @@ -725,7 +554,7 @@ def create_assembly_node_tree( "name": "assembly_id", "type": "NodeSocketInt", "min": 1, - "max": max(mesh.named_attribute(data_object, "assembly_id")), + "max": max(bpyd.named_attribute(data_object, "assembly_id")), "default": 1, }, ) diff --git a/molecularnodes/blender/utils.py b/molecularnodes/blender/utils.py new file mode 100644 index 00000000..99dd22c4 --- /dev/null +++ b/molecularnodes/blender/utils.py @@ -0,0 +1,11 @@ +import bpy +from pathlib import Path + + +def path_resolve(path: str | Path) -> Path: + if isinstance(path, str): + return Path(bpy.path.abspath(path)) + elif isinstance(path, Path): + return Path(bpy.path.abspath(str(path))) + else: + raise ValueError(f"Unable to resolve path: {path}") diff --git a/molecularnodes/blender_manifest.toml b/molecularnodes/blender_manifest.toml index 61ae37dc..7d6751bf 100644 --- a/molecularnodes/blender_manifest.toml +++ b/molecularnodes/blender_manifest.toml @@ -35,17 +35,16 @@ wheels = [ "./wheels/biotite-0.41.2-cp311-cp311-macosx_11_0_arm64.whl", "./wheels/biotite-0.41.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", "./wheels/biotite-0.41.2-cp311-cp311-win_amd64.whl", - "./wheels/colorama-0.4.6-py2.py3-none-any.whl", "./wheels/contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", "./wheels/contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", "./wheels/contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", "./wheels/contourpy-1.3.0-cp311-cp311-win_amd64.whl", "./wheels/cycler-0.12.1-py3-none-any.whl", "./wheels/fasteners-0.19-py3-none-any.whl", - "./wheels/fonttools-4.54.0-cp311-cp311-macosx_10_9_universal2.whl", - "./wheels/fonttools-4.54.0-cp311-cp311-macosx_11_0_arm64.whl", - "./wheels/fonttools-4.54.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", - "./wheels/fonttools-4.54.0-cp311-cp311-win_amd64.whl", + "./wheels/fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", + "./wheels/fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", + "./wheels/fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", + "./wheels/fonttools-4.54.1-cp311-cp311-win_amd64.whl", "./wheels/joblib-1.4.2-py3-none-any.whl", "./wheels/kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", "./wheels/kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", @@ -62,17 +61,17 @@ wheels = [ "./wheels/msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", "./wheels/msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", "./wheels/msgpack-1.1.0-cp311-cp311-win_amd64.whl", - "./wheels/networkx-3.3-py3-none-any.whl", + "./wheels/networkx-3.4.2-py3-none-any.whl", "./wheels/packaging-24.1-py3-none-any.whl", "./wheels/pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", "./wheels/pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", "./wheels/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", "./wheels/pandas-2.2.3-cp311-cp311-win_amd64.whl", - "./wheels/pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", - "./wheels/pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", - "./wheels/pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", - "./wheels/pillow-10.4.0-cp311-cp311-win_amd64.whl", - "./wheels/pyparsing-3.1.4-py3-none-any.whl", + "./wheels/pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", + "./wheels/pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", + "./wheels/pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", + "./wheels/pillow-11.0.0-cp311-cp311-win_amd64.whl", + "./wheels/pyparsing-3.2.0-py3-none-any.whl", "./wheels/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", "./wheels/pytz-2024.2-py2.py3-none-any.whl", "./wheels/scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", @@ -82,7 +81,7 @@ wheels = [ "./wheels/six-1.16.0-py2.py3-none-any.whl", "./wheels/starfile-0.5.6-py3-none-any.whl", "./wheels/threadpoolctl-3.5.0-py3-none-any.whl", - "./wheels/tqdm-4.66.5-py3-none-any.whl", + "./wheels/tqdm-4.66.6-py3-none-any.whl", "./wheels/typing_extensions-4.12.2-py3-none-any.whl", "./wheels/tzdata-2024.2-py2.py3-none-any.whl", ] diff --git a/molecularnodes/bpyd/__init__.py b/molecularnodes/bpyd/__init__.py new file mode 100644 index 00000000..93e839e6 --- /dev/null +++ b/molecularnodes/bpyd/__init__.py @@ -0,0 +1,13 @@ +from .object import ObjectTracker, BlenderObject, create_object, create_bob +from .vdb import import_vdb +from .utils import centre, lerp +from .attribute import ( + named_attribute, + store_named_attribute, + Attribute, + AttributeType, + AttributeTypeInfo, + AttributeTypes, + Domains, + DomainType, +) diff --git a/molecularnodes/bpyd/attribute.py b/molecularnodes/bpyd/attribute.py new file mode 100644 index 00000000..46135872 --- /dev/null +++ b/molecularnodes/bpyd/attribute.py @@ -0,0 +1,337 @@ +from dataclasses import dataclass +from enum import Enum +from typing import Type +import bpy +import numpy as np + +from pathlib import Path + + +def path_resolve(path: str | Path) -> Path: + if isinstance(path, str): + return Path(bpy.path.abspath(path)) + elif isinstance(path, Path): + return Path(bpy.path.abspath(str(path))) + else: + raise ValueError(f"Unable to resolve path: {path}") + + +@dataclass +class AttributeTypeInfo: + dname: str + dtype: type + width: int + + +@dataclass +class DomainType: + name: str + + def __str__(self): + return self.name + + +class AttributeMismatchError(Exception): + def __init__(self, message): + self.message = message + super().__init__(self.message) + + +# https://docs.blender.org/api/current/bpy_types_enum_items/attribute_domain_items.html#rna-enum-attribute-domain-items +class Domains: + POINT = DomainType(name="POINT") + EDGE = DomainType(name="EDGE") + FACE = DomainType(name="FACE") + CORNER = DomainType(name="CORNER") + CURVE = DomainType(name="CURVE") + INSTANCE = DomainType(name="INSTNANCE") + LAYER = DomainType(name="LAYER") + + +@dataclass +class AttributeType: + type_name: str + value_name: str + dtype: Type + dimensions: tuple + + def __str__(self) -> str: + return self.type_name + + +# https://docs.blender.org/api/current/bpy_types_enum_items/attribute_type_items.html#rna-enum-attribute-type-items +class AttributeTypes(Enum): + # https://docs.blender.org/api/current/bpy.types.FloatAttribute.html#bpy.types.FloatAttribute + FLOAT = AttributeType( + type_name="FLOAT", value_name="value", dtype=float, dimensions=(1,) + ) + # https://docs.blender.org/api/current/bpy.types.FloatVectorAttribute.html#bpy.types.FloatVectorAttribute + FLOAT_VECTOR = AttributeType( + type_name="FLOAT_VECTOR", value_name="vector", dtype=float, dimensions=(3,) + ) + # https://docs.blender.org/api/current/bpy.types.Float2Attribute.html#bpy.types.Float2Attribute + FLOAT2 = AttributeType( + type_name="FLOAT2", value_name="vector", dtype=float, dimensions=(2,) + ) + # alternatively use color_srgb to get the color info in sRGB color space, otherwise linear color space + # https://docs.blender.org/api/current/bpy.types.FloatColorAttributeValue.html#bpy.types.FloatColorAttributeValue + FLOAT_COLOR = AttributeType( + type_name="FLOAT_COLOR", value_name="color", dtype=float, dimensions=(4,) + ) + # https://docs.blender.org/api/current/bpy.types.ByteColorAttribute.html#bpy.types.ByteColorAttribute + # TODO unsure about this, int values are stored but float values are returned + BYTE_COLOR = AttributeType( + type_name="BYTE_COLOR", value_name="color", dtype=int, dimensions=(4,) + ) + # https://docs.blender.org/api/current/bpy.types.QuaternionAttribute.html#bpy.types.QuaternionAttribute + QUATERNION = AttributeType( + type_name="QUATERNION", value_name="value", dtype=float, dimensions=(4,) + ) + # https://docs.blender.org/api/current/bpy.types.IntAttribute.html#bpy.types.IntAttribute + INT = AttributeType(type_name="INT", value_name="value", dtype=int, dimensions=(1,)) + # https://docs.blender.org/api/current/bpy.types.ByteIntAttributeValue.html#bpy.types.ByteIntAttributeValue + INT8 = AttributeType( + type_name="INT8", value_name="value", dtype=int, dimensions=(1,) + ) + # https://docs.blender.org/api/current/bpy.types.Int2Attribute.html#bpy.types.Int2Attribute + INT32_2D = AttributeType( + type_name="INT32_2D", value_name="value", dtype=int, dimensions=(2,) + ) + # https://docs.blender.org/api/current/bpy.types.Float4x4Attribute.html#bpy.types.Float4x4Attribute + FLOAT4X4 = AttributeType( + type_name="FLOAT4X4", value_name="value", dtype=float, dimensions=(4, 4) + ) + # https://docs.blender.org/api/current/bpy.types.BoolAttribute.html#bpy.types.BoolAttribute + BOOLEAN = AttributeType( + type_name="BOOLEAN", value_name="value", dtype=bool, dimensions=(1,) + ) + + +def guess_atype_from_array(array: np.ndarray) -> AttributeType: + if not isinstance(array, np.ndarray): + raise ValueError(f"`array` must be a numpy array, not {type(array)=}") + + dtype = array.dtype + shape = array.shape + n_row = shape[0] + + # for 1D arrays we we use the float, int of boolean attribute types + if shape == (n_row, 1) or shape == (n_row,): + if np.issubdtype(dtype, np.int_): + return AttributeTypes.INT + elif np.issubdtype(dtype, np.float_): + return AttributeTypes.FLOAT + elif np.issubdtype(dtype, np.bool_): + return AttributeTypes.BOOLEAN + + # for 2D arrays we use the float_vector, float_color, float4x4 attribute types + elif shape == (n_row, 4, 4): + return AttributeTypes.FLOAT4X4 + elif shape == (n_row, 3): + return AttributeTypes.FLOAT_VECTOR + elif shape == (n_row, 4): + return AttributeTypes.FLOAT_COLOR + + # if we didn't match against anything return float + return AttributeTypes.FLOAT + + +class Attribute: + """ + Wrapper around a Blender attribute to provide a more convenient interface with numpy arrays + """ + + def __init__(self, attribute: bpy.types.Attribute): + self.attribute = attribute + self.n_attr = len(attribute.data) + self.atype = AttributeTypes[self.attribute.data_type].value + + @property + def value_name(self): + return self.atype.value_name + + @property + def is_1d(self): + return self.atype.dimensions == (1,) + + @property + def type_name(self): + return self.atype.type_name + + @property + def shape(self): + return (self.n_attr, *self.atype.dimensions) + + @property + def dtype(self) -> Type: + return self.atype.dtype + + @property + def n_values(self) -> int: + return np.prod(self.shape, dtype=int) + + @classmethod + def from_object( + cls, + obj: bpy.types.Object, + name: str, + atype: AttributeType, + domain: DomainType, + ): + att = obj.data.get(name) + if att is None: + att = obj.data.attributes.new( + name=name, type=atype.value.type_name, domain=domain.value.name + ) + return Attribute(att) + + def from_array(self, array: np.ndarray) -> None: + """ + Set the attribute data from a numpy array + """ + if array.shape != self.shape: + raise ValueError( + f"Array shape {array.shape} does not match attribute shape {self.shape}" + ) + + self.attribute.data.foreach_set(self.value_name, array.reshape(-1)) + + def as_array(self) -> np.ndarray: + """ + Returns the attribute data as a numpy array + """ + # initialize empty 1D array that is needed to then be filled with values + # from the Blender attribute + array = np.zeros(self.n_values, dtype=self.dtype) + self.attribute.data.foreach_get(self.value_name, array) + + # if the attribute has more than one dimension reshape the array before returning + if self.is_1d: + return array + else: + return array.reshape(self.shape) + + def __str__(self): + return "Attribute: {}, type: {}, size: {}".format( + self.attribute.name, self.type_name, self.shape + ) + + +def store_named_attribute( + obj: bpy.types.Object, + data: np.ndarray, + name: str, + atype: str | AttributeType | None = None, + domain: str | DomainType = Domains.POINT, + overwrite: bool = True, +) -> bpy.types.Attribute: + """ + Adds and sets the values of an attribute on the object. + + Parameters + ---------- + obj : bpy.types.Object + The Blender object. + name : str + The name of the attribute. + data : np.ndarray + The attribute data as a numpy array. + atype : str, AttributeType, optional + The attribute type to store the data as. One of the AttributeType enums or a string + of the same name. + 'FLOAT_VECTOR', 'FLOAT_COLOR', 'FLOAT4X4', 'QUATERNION', 'FLOAT', 'INT', 'BOOLEAN' + domain : str, optional + The domain of the attribute. Defaults to 'POINT'. Currently, only 'POINT', 'EDGE', + and 'FACE' have been tested. + overwrite : bool + Setting to false will create a new attribute if the given name is already an + attribute on the mesh. + + Returns + ------- + bpy.types.Attribute + The added attribute. + """ + + if isinstance(atype, str): + try: + atype = AttributeTypes[atype] + except KeyError: + raise ValueError( + f"Given data type {atype=} does not match any of the possible attribute types: {list(AttributeTypes)=}" + ) + + if atype is None: + atype = guess_atype_from_array(data) + + attribute = obj.data.attributes.get(name) # type: ignore + if not attribute or not overwrite: + attribute = obj.data.attributes.new(name, atype.value.type_name, str(domain)) + + if len(data) != len(attribute.data): + raise AttributeMismatchError( + f"Data length {len(data)}, dimensions {data.shape} does not equal the size of the target domain {domain}, len={len(attribute.data)=}" + ) + + # the 'foreach_set' requires a 1D array, regardless of the shape of the attribute + # so we have to flatten it first + attribute.data.foreach_set(atype.value.value_name, data.reshape(-1)) + + # The updating of data doesn't work 100% of the time (see: + # https://projects.blender.org/blender/blender/issues/118507) so this resetting of a + # single vertex is the current fix. Not great as I can see it breaking when we are + # missing a vertex - but for now we shouldn't be dealing with any situations where this + # is the case For now we will set a single vert to it's own position, which triggers a + # proper refresh of the object data. + try: + obj.data.vertices[0].co = obj.data.vertices[0].co # type: ignore + except AttributeError: + obj.data.update() # type: ignore + + return attribute + + +def evaluate_object(obj: bpy.types.Object): + "Return an object which has the modifiers evaluated." + obj.update_tag() + return obj.evaluated_get(bpy.context.evaluated_depsgraph_get()) + + +def named_attribute( + obj: bpy.types.Object, name="position", evaluate=False +) -> np.ndarray: + """ + Get the named attribute data from the object, optionally evaluating modifiers first. + + Parameters: + object (bpy.types.Object): The Blender object. + name (str, optional): The name of the attribute. Defaults to 'position'. + + Returns: + np.ndarray: The attribute data as a numpy array. + """ + if evaluate: + obj = evaluate_object(obj) + verbose = False + try: + attr = Attribute(obj.data.attributes[name]) + except KeyError: + message = f"The selected attribute '{name}' does not exist on the mesh." + if verbose: + message += f"Possible attributes are: {obj.data.attributes.keys()}" + + raise AttributeError(message) + + return attr.as_array() + + +def remove_named_attribute( + obj: bpy.types.Object, name: str, domain: str | DomainType = Domains.POINT +): + try: + attr = obj.data.attributes[name] + obj.data.attributes.remove(attr) + except KeyError: + raise AttributeError( + f"The selected attribute '{name}' does not exist on the mesh." + ) diff --git a/molecularnodes/bpyd/collection.py b/molecularnodes/bpyd/collection.py new file mode 100644 index 00000000..3c3b14c3 --- /dev/null +++ b/molecularnodes/bpyd/collection.py @@ -0,0 +1,23 @@ +import bpy +from bpy.types import Collection + + +def create_collection( + name: str = "NewCollection", parent: Collection | str | None = None +) -> Collection: + if isinstance(parent, str): + try: + parent = bpy.data.collections[name] + except KeyError: + parent = bpy.data.collections.new(name) + bpy.context.scene.collection.children.linke(parent) + try: + coll = bpy.data.collections[name] + except KeyError: + coll = bpy.data.collections.new(name) + if parent is None: + bpy.context.scene.collection.children.link(coll) + else: + parent.children.link(coll) + + return coll diff --git a/molecularnodes/bpyd/material.py b/molecularnodes/bpyd/material.py new file mode 100644 index 00000000..625dbfec --- /dev/null +++ b/molecularnodes/bpyd/material.py @@ -0,0 +1,18 @@ +from bpy.types import Material +import bpy +import os + + +# TODO: use DuplicatePrevention when adding material node trees +def append_from_blend(name: str, filepath: str) -> Material: + if not os.path.exists(filepath): + raise FileNotFoundError(f"Given file not found: {filepath}") + try: + return bpy.data.materials[name] + except KeyError: + bpy.ops.wm.append( + directory=os.path.join(filepath, "Material"), + filename=name, + link=False, + ) + return bpy.data.materials[name] diff --git a/molecularnodes/bpyd/nodes.py b/molecularnodes/bpyd/nodes.py new file mode 100644 index 00000000..08760802 --- /dev/null +++ b/molecularnodes/bpyd/nodes.py @@ -0,0 +1,171 @@ +import bpy +from typing import List +import re +import time +import warnings + + +NODE_DUP_SUFFIX = r"\.\d{3}$" + + +class NodeGroupCreationError(Exception): + def __init__(self, message): + self.message = message + super().__init__(self.message) + + +def deduplicate_node_trees(node_trees: List[str]): + # Compile the regex pattern for matching a suffix of a dot followed by 3 numbers + node_duplicate_pattern = re.compile(r"\.\d{3}$") + to_remove: List[bpy.types.GeometryNodeTree] = [] + + for node_tree in node_trees: + # Check if the node tree's name matches the duplicate pattern and is not a "NodeGroup" + for node in node_tree.nodes: + if not ( + hasattr(node, "node_tree") + and node_duplicate_pattern.search(node.node_tree.name) + and "NodeGroup" not in node.node_tree.name + ): + continue + + old_name = node.node_tree.name + # Remove the numeric suffix to get the original name + name_sans = old_name.rsplit(".", 1)[0] + replacement = bpy.data.node_groups.get(name_sans) + if not replacement: + continue + + # print(f"matched {old_name} with {name_sans}") + node.node_tree = replacement + to_remove.append(bpy.data.node_groups[old_name]) + + for tree in to_remove: + try: + # remove the data from the blend file + bpy.data.node_groups.remove(tree) + except ReferenceError: + pass + + +def cleanup_duplicates(purge: bool = False): + # Collect all node trees from node groups, excluding "NodeGroup" named ones + node_trees = [tree for tree in bpy.data.node_groups if "NodeGroup" not in tree.name] + + # Call the deduplication function with the collected node trees + deduplicate_node_trees(node_trees) + + if purge: + # Purge orphan data blocks from the file + bpy.ops.outliner.orphans_purge() + + +class DuplicatePrevention: + "Context manager to cleanup duplicated node trees when appending node groups" + + def __init__(self, timing=False): + self.current_names: List[str] = [] + self.start_time = None + self.timing = timing + + def __enter__(self): + self.current_names = [tree.name for tree in bpy.data.node_groups] + if self.timing: + self.start_time = time.time() + + def __exit__(self, type, value, traceback): + new_trees = [ + tree for tree in bpy.data.node_groups if tree.name not in self.current_names + ] + deduplicate_node_trees(new_trees) + if self.timing: + end_time = time.time() + print(f"De-duplication time: {end_time - self.start_time:.2f} seconds") + + +class MaintainConnections: + # capture input and output links, so we can rebuild the links based on name + # and the sockets they were connected to + # as we collect them, remove the links so they aren't automatically connected + # when we change the node_tree for the group + + def __init__(self, node: bpy.types.GeometryNode) -> None: + self.node = node + self.input_links = [] + self.output_links = [] + + def __enter__(self): + "Store all the connections in and out of this node for rebuilding on exit." + self.node_tree = self.node.id_data + + for input in self.node.inputs: + for input_link in input.links: + self.input_links.append((input_link.from_socket, input.name)) + self.node_tree.links.remove(input_link) + + for output in self.node.outputs: + for output_link in output.links: + self.output_links.append((output.name, output_link.to_socket)) + self.node_tree.links.remove(output_link) + + try: + self.material = self.node.inputs["Material"].default_value + except KeyError: + self.material = None + + def __exit__(self, type, value, traceback): + "Rebuild the connections in and out of this node that were stored on entry." + # rebuild the links based on names of the sockets, not their identifiers + link = self.node_tree.links.new + for input_link in self.input_links: + try: + link(input_link[0], self.node.inputs[input_link[1]]) + except KeyError: + pass + for output_link in self.output_links: + try: + link(self.node.outputs[output_link[0]], output_link[1]) + except KeyError: + pass + + # reset all values to tree defaults + tree = self.node.node_tree + for item in tree.interface.items_tree: + if item.item_type == "PANEL": + continue + if item.in_out == "INPUT": + if hasattr(item, "default_value"): + self.node.inputs[item.identifier].default_value = item.default_value + + if self.material: + try: + self.node.inputs["Material"].default_value = self.material + except KeyError: + # the new node doesn't contain a material slot + pass + + +def swap_tree(node: bpy.types.GeometryNode, tree: bpy.types.GeometryNodeTree) -> None: + with MaintainConnections(node): + node.node_tree = tree + node.name = tree.name + + +def append_from_blend( + name: str, filepath: str, link: bool = False +) -> bpy.types.GeometryNodeTree: + "Append a Geometry Nodes node tree from the given .blend file" + try: + return bpy.data.node_groups[name] + except KeyError: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + with DuplicatePrevention(): + bpy.ops.wm.append( + "EXEC_DEFAULT", + directory=filepath, + filename=name, + link=link, + use_recursive=True, + ) + return bpy.data.node_groups[name] diff --git a/molecularnodes/bpyd/object.py b/molecularnodes/bpyd/object.py new file mode 100644 index 00000000..e6f5cd26 --- /dev/null +++ b/molecularnodes/bpyd/object.py @@ -0,0 +1,514 @@ +import bpy +from bpy.types import Object +import numpy as np +from .attribute import ( + AttributeTypes, + AttributeType, + Domains, + DomainType, +) +from . import attribute as attr +from .utils import centre +from mathutils import Matrix + + +class ObjectMissingError(Exception): + def __init__(self, message: str): + self.message = message + super().__init__(self.message) + + +class ObjectTracker: + """ + A context manager for tracking new objects in Blender. + + This class provides a way to track new objects that are added to Blender's bpy.data.objects collection. + It stores the current objects when entering the context and provides a method to find new objects that were added when exiting the context. + + Methods + ------- + new_objects() + Returns a list of new objects that were added to bpy.data.objects while in the context. + """ + + def __enter__(self): + """ + Store the current objects and their names when entering the context. + + Returns + ------- + self + The instance of the class. + """ + self.objects = list(bpy.context.scene.objects) + return self + + def __exit__(self, type, value, traceback): + pass + + def new_objects(self): + """ + Find new objects that were added to bpy.data.objects while in the context. + + Use new_objects()[-1] to get the most recently added object. + + Returns + ------- + list + A list of new objects. + """ + obj_names = list([o.name for o in self.objects]) + current_objects = bpy.context.scene.objects + new_objects = [] + for obj in current_objects: + if obj.name not in obj_names: + new_objects.append(obj) + return new_objects + + def latest(self): + """ + Get the most recently added object. + + This method returns the most recently added object to bpy.data.objects while in the context. + + Returns + ------- + Object + The most recently added object. + """ + return self.new_objects()[-1] + + +class BlenderObject: + """ + A convenience class for working with Blender objects + """ + + def __init__(self, obj: Object | None): + """ + Initialize the BlenderObject. + + Parameters + ---------- + obj : Object | None + The Blender object to wrap. + """ + if not isinstance(obj, Object): + raise ValueError(f"{obj} must be a Blender object of type Object") + self._object = obj + + @property + def object(self) -> Object | None: + """ + Get the Blender object. + + Returns + ------- + Object | None + The Blender object, or None if not found. + """ + # If we don't have connection to an object, attempt to re-stablish to a new + # object in the scene with the same UUID. This helps if duplicating / deleting + # objects in the scene, but sometimes Blender just loses reference to the object + # we are working with because we are manually setting the data on the mesh, + # which can wreak havoc on the object database. To protect against this, + # if we have a broken link we just attempt to find a new suitable object for it + try: + # if the connection is broken then trying to the name will raise a connection + # error. If we are loading from a saved session then the object_ref will be + # None and get an AttributeError + self._object.name + return self._object + except (ReferenceError, AttributeError): + for obj in bpy.data.objects: + if obj.mn.uuid == self.uuid: + print( + Warning( + f"Lost connection to object: {self._object}, now connected to {obj}" + ) + ) + self._object = obj + return obj + + return None + + @object.setter + def object(self, value: Object) -> None: + """ + Set the Blender object. + + Parameters + ---------- + value : Object + The Blender object to set. + """ + self._object = value + + def store_named_attribute( + self, + data: np.ndarray, + name: str, + atype: str | AttributeType | None = None, + domain: str | DomainType = Domains.POINT, + ) -> None: + """ + Store a named attribute on the Blender object. + + Parameters + ---------- + data : np.ndarray + The data to be stored as an attribute. + name : str + The name for the attribute. Will overwrite an already existing attribute. + atype : str or AttributeType or None, optional + The attribute type to store the data as. Either string or selection from the + AttributeTypes enum. None will attempt to infer the attribute type from the + input array. + domain : str or DomainType, optional + The domain to store the attribute on. Defaults to Domains.POINT. + + Returns + ------- + self + """ + attr.store_named_attribute( + self.object, data=data, name=name, atype=atype, domain=domain + ) + return self + + def remove_named_attribute(self, name: str) -> None: + """ + Remove a named attribute from the object. + + Parameters + ---------- + name : str + The name of the attribute to remove. + """ + attr.remove_named_attribute(self.object, name=name) + + def named_attribute(self, name: str, evaluate: bool = False) -> np.ndarray: + """ + Retrieve a named attribute from the object. + + Optionally, evaluate the object before reading the named attribute + + Parameters + ---------- + name : str + Name of the attribute to get. + evaluate : bool, optional + Whether to evaluate the object before reading the attribute (default is False). + Returns + ------- + np.ndarray + The attribute read from the mesh as a numpy array. + """ + return attr.named_attribute(self.object, name=name, evaluate=evaluate) + + def set_boolean(self, array: np.ndarray, name: str) -> None: + """ + Store a boolean attribute on the Blender object. + + Parameters + ---------- + array : np.ndarray + The boolean data to be stored as an attribute. + name : str + The name for the attribute. + """ + self.store_named_attribute(array, name=name, atype=AttributeTypes.BOOLEAN) + + def evaluate(self): + """ + Evaluate the object and return a new BlenderObject with the evaluated object. + + Returns + ------- + BlenderObject + A new BlenderObject with the evaluated object. + """ + obj = self.object + obj.update_tag() + evluated_obj = obj.evaluated_get(bpy.context.evaluated_depsgraph_get()) + return BlenderObject(evluated_obj) + + def centroid(self, weight: str | np.ndarray | None = None) -> np.ndarray: + """ + Return the centroid, potentially weighted by an attribute. + + If the weight is a string, an attribute of that name is attempted to be accessed + on the mesh. If an array is given that array is used as weights. A value of None + returns just the centroid calculation. + + Parameters + ---------- + weight : str | np.ndarray | None, optional + The weights to apply to the positions when calculating the centroid. Defaults to None. + + Returns + ------- + np.ndarray + A 3-component vector with the calculated centroid. + """ + if isinstance(weight, str): + return centre(self.position, self.named_attribute(weight)) + + if isinstance(weight, np.ndarray): + return centre(self.position, weight) + + return centre(self.position) + + @property + def attributes(self): + """ + Get the attributes of the Blender object. + + Returns + ------- + bpy.types.Attributes + The attributes of the Blender object. + """ + return self.object.data.attributes + + @property + def vertices(self): + """ + Get the vertices of the Blender object. + + Returns + ------- + bpy.types.Vertices + The vertices of the Blender object. + """ + return self.object.data.vertices + + @property + def edges(self): + """ + Get the edges of the Blender object. + + Returns + ------- + bpy.types.Edges + The edges of the Blender object. + """ + return self.object.data.edges + + def transform_origin(self, matrix: Matrix) -> None: + """ + Transform the origin of the Blender object. + + Parameters + ---------- + matrix : Matrix + The transformation matrix to apply to the origin. + """ + self.object.matrix_local = matrix * self.object.matrix_world + + def transform_points(self, matrix: Matrix) -> None: + """ + Transform the points of the Blender object. + + Parameters + ---------- + matrix : Matrix + The transformation matrix to apply to the points. + """ + self.position = self.position * matrix + + @property + def selected(self) -> np.ndarray: + """ + Get the selected vertices of the Blender object. + + Returns + ------- + np.ndarray + The selected vertices of the Blender object. + """ + return self.named_attribute(".select_vert") + + @property + def name(self) -> str: + """ + Get the name of the Blender object. + + Returns + ------- + str + The name of the Blender object. + """ + obj = self.object + if obj is None: + return None + + return obj.name + + @name.setter + def name(self, value: str) -> None: + """ + Set the name of the Blender object. + + Parameters + ---------- + value : str + The name to set for the Blender object. + """ + obj = self.object + if obj is None: + raise ObjectMissingError + obj.name = value + + @property + def position(self) -> np.ndarray: + """ + Get the position of the vertices of the Blender object. + + Returns + ------- + np.ndarray + The position of the vertices of the Blender object. + """ + return self.named_attribute("position") + + @position.setter + def position(self, value: np.ndarray) -> None: + """ + Set the position of the vertices of the Blender object. + + Parameters + ---------- + value : np.ndarray + The position to set for the vertices of the Blender object. + """ + self.store_named_attribute( + value, + name="position", + atype=AttributeTypes.FLOAT_VECTOR, + domain=Domains.POINT, + ) + + def selected_positions(self, mask: np.ndarray | None = None) -> np.ndarray: + """ + Get the positions of the selected vertices, optionally filtered by a mask. + + Parameters + ---------- + mask : np.ndarray | None, optional + The mask to filter the selected vertices. Defaults to None. + + Returns + ------- + np.ndarray + The positions of the selected vertices. + """ + if mask is not None: + return self.position[np.logical_and(self.selected, mask)] + + return self.position[self.selected] + + def list_attributes( + self, evaluate: bool = False, drop_hidden: bool = False + ) -> list | None: + """ + Returns a list of attribute names for the object. + + Parameters + ---------- + evaluate : bool, optional + Whether to first evaluate the modifiers on the object before listing the + available attributes. + drop_hidden : bool, optional + Whether to drop hidden attributes (those starting with a dot). Defaults to False. + + Returns + ------- + list[str] | None + A list of attribute names if the molecule object exists, None otherwise. + """ + if evaluate: + strings = list(self.evaluate().attributes.keys()) + else: + strings = list(self.object.attributes.keys()) + + if not drop_hidden: + return strings + else: + return filter(lambda x: not x.startswith("."), strings) + + def __len__(self) -> int: + """ + Get the number of vertices in the Blender object. + + Returns + ------- + int + The number of vertices in the Blender object. + """ + return len(self.object.data.vertices) + + +def create_object( + vertices: np.ndarray | None = None, + edges: np.ndarray | None = None, + faces: np.ndarray | None = None, + name: str = "NewObject", + collection: bpy.types.Collection | None = None, +) -> Object: + """ + Create a new Blender object and corresponding mesh. + + Vertices are created for each row in the vertices array. If edges and / or faces are created then they are also + initialized but default to None. + + Parameters + ---------- + vertices : np.ndarray, optional + The vertices of the vertices as a numpy array. Defaults to None. + edges : np.ndarray, optional + The edges of the object as a numpy array. Defaults to None. + faces : np.ndarray, optional + The faces of the object as a numpy array. Defaults to None. + name : str, optional + The name of the object. Defaults to 'NewObject'. + collection : bpy.types.Collection, optional + The collection to link the object to. Defaults to None. + + Returns + ------- + Object + The created object. + """ + if vertices is None: + vertices = [] + if edges is None: + edges = [] + if faces is None: + faces = [] + mesh = bpy.data.meshes.new(name) + mesh.from_pydata(vertices=vertices, edges=edges, faces=faces) + obj = bpy.data.objects.new(name, mesh) + if not collection: + collection = bpy.data.collections["Collection"] + collection.objects.link(obj) + return obj + + +def create_bob( + vertices: np.ndarray | None = None, + edges: np.ndarray | None = None, + faces: np.ndarray | None = None, + name: str = "NewObject", + collection: bpy.types.Collection | None = None, +) -> BlenderObject: + "Create an object but return it wrapped as a BlenderObject" + return BlenderObject( + create_object( + vertices=vertices, + edges=edges, + faces=faces, + name=name, + collection=collection, + ) + ) diff --git a/molecularnodes/bpyd/utils.py b/molecularnodes/bpyd/utils.py new file mode 100644 index 00000000..951f88fd --- /dev/null +++ b/molecularnodes/bpyd/utils.py @@ -0,0 +1,46 @@ +import numpy as np + + +def centre(position: np.ndarray, weight: np.ndarray | None = None): + "Calculate the weighted centroid of the vectors" + if weight is None: + return np.mean(position, axis=0) + return np.sum(position * weight.reshape((-1, 1)), axis=0) / np.sum(weight) + + +def lerp(a: np.ndarray, b: np.ndarray, t: float = 0.5) -> np.ndarray: + """ + Linearly interpolate between two values. + + Parameters + ---------- + a : array_like + The starting value. + b : array_like + The ending value. + t : float, optional + The interpolation parameter. Default is 0.5. + + Returns + ------- + array_like + The interpolated value(s). + + Notes + ----- + This function performs linear interpolation between `a` and `b` using the + interpolation parameter `t` such that the result lies between `a` and `b`. + + Examples + -------- + >>> lerp(1, 2, 0.5) + 1.5 + + >>> lerp(3, 7, 0.2) + 3.8 + + >>> lerp([1, 2, 3], [4, 5, 6], 0.5) + array([2.5, 3.5, 4.5]) + + """ + return np.add(a, np.multiply(np.subtract(b, a), t)) diff --git a/molecularnodes/bpyd/vdb.py b/molecularnodes/bpyd/vdb.py new file mode 100644 index 00000000..b43e95cf --- /dev/null +++ b/molecularnodes/bpyd/vdb.py @@ -0,0 +1,36 @@ +import bpy +from .object import ObjectTracker +from .collection import create_collection + + +def import_vdb( + file: str, collection: str | bpy.types.Collection | None = None +) -> bpy.types.Object: + """ + Imports a VDB file as a Blender volume object. + + Parameters + ---------- + file : str + Path to the VDB file. + + Returns + ------- + bpy.types.Object + A Blender object containing the imported volume data. + """ + + # import the volume object + with ObjectTracker() as o: + bpy.ops.object.volume_import(filepath=file, files=[]) + obj = o.latest() + + if collection: + # Move the object to the given collection + initial_collection = obj.users_collection[0] + initial_collection.objects.unlink(obj) + if isinstance(collection, str): + collection = create_collection(collection) + collection.objects.link(obj) + + return obj diff --git a/molecularnodes/entities/density/mrc.py b/molecularnodes/entities/density/mrc.py index 98296f31..7eebe97a 100644 --- a/molecularnodes/entities/density/mrc.py +++ b/molecularnodes/entities/density/mrc.py @@ -1,7 +1,8 @@ from .density import Density import mrcfile -from ...blender import coll, mesh, nodes +from ...blender import coll, nodes +from ... import bpyd import bpy import numpy as np import os @@ -42,7 +43,7 @@ def create_object( """ # import and ensure object is at world origin to get corect alignment with # structures - object = mesh.import_vdb(self.file_vdb, collection=coll.mn()) + object = bpyd.import_vdb(self.file_vdb, collection=coll.mn()) object.location = (0, 0, 0) self.object = object object.mn["molecule_type"] = "density" diff --git a/molecularnodes/entities/ensemble/cellpack.py b/molecularnodes/entities/ensemble/cellpack.py index 6a56afdb..6aaf2f95 100644 --- a/molecularnodes/entities/ensemble/cellpack.py +++ b/molecularnodes/entities/ensemble/cellpack.py @@ -8,6 +8,7 @@ from .cif import OldCIF from ..molecule import molecule from ... import blender as bl +from ...bpyd import store_named_attribute, AttributeTypes from ... import color @@ -68,12 +69,11 @@ def _create_object_instances( ) colors = np.tile(color.random_rgb(i), (len(chain_atoms), 1)) - bl.mesh.store_named_attribute( - obj, - name="Color", + store_named_attribute( + obj=obj, data=colors, - data_type="FLOAT_COLOR", - overwrite=True, + name="Color", + atype=AttributeTypes.FLOAT_COLOR, ) if node_setup: diff --git a/molecularnodes/entities/ensemble/star.py b/molecularnodes/entities/ensemble/star.py index d76eedcb..2f195d54 100644 --- a/molecularnodes/entities/ensemble/star.py +++ b/molecularnodes/entities/ensemble/star.py @@ -7,6 +7,8 @@ from PIL import Image from ... import blender as bl +from ...bpyd import AttributeTypes +from ... import bpyd from .ensemble import Ensemble @@ -211,45 +213,41 @@ def _update_micrograph_texture(self, *_): self.star_node.inputs["Micrograph"].default_value = image_obj def create_object(self, name="StarFileObject", node_setup=True, world_scale=0.01): - blender_object = bl.mesh.create_object( + bob = bpyd.create_bob( self.positions * world_scale, collection=bl.coll.mn(), name=name ) - blender_object.mn["molecule_type"] = "star" + bob.object.mn["molecule_type"] = "star" # create attribute for every column in the STAR file for col in self.data.columns: col_type = self.data[col].dtype # If col_type is numeric directly add if np.issubdtype(col_type, np.number): - bl.mesh.store_named_attribute( - blender_object, - col, - self.data[col].to_numpy().reshape(-1), - "FLOAT", - "POINT", + bob.store_named_attribute( + name=col, + data=self.data[col].to_numpy().reshape(-1), + atype=AttributeTypes.FLOAT, ) # If col_type is object, convert to category and add integer values - elif col_type == object: + elif isinstance(col_type, object): codes = ( self.data[col].astype("category").cat.codes.to_numpy().reshape(-1) ) - bl.mesh.store_named_attribute( - blender_object, col, codes, "INT", "POINT" + bob.store_named_attribute( + data=codes, name=col, atype=AttributeTypes.INT ) # Add the category names as a property to the blender object - blender_object[f"{col}_categories"] = list( + bob.object[f"{col}_categories"] = list( self.data[col].astype("category").cat.categories ) - blender_object.mn.uuid = self.uuid + bob.object.mn.uuid = self.uuid if node_setup: - bl.nodes.create_starting_nodes_starfile( - blender_object, n_images=self.n_images - ) + bl.nodes.create_starting_nodes_starfile(bob.object, n_images=self.n_images) - blender_object["starfile_path"] = str(self.file_path) - self.object = blender_object + bob.object["starfile_path"] = str(self.file_path) + self.object = bob.object bpy.app.handlers.depsgraph_update_post.append(self._update_micrograph_texture) - return blender_object + return self.object diff --git a/molecularnodes/entities/entity.py b/molecularnodes/entities/entity.py index 1e63d261..0cd3f3de 100644 --- a/molecularnodes/entities/entity.py +++ b/molecularnodes/entities/entity.py @@ -1,183 +1,20 @@ from abc import ABCMeta import bpy from uuid import uuid1 -from .. import blender as bl -import warnings -import numpy as np +from ..bpyd import ( + BlenderObject, +) -class ObjectMissingError(Exception): - def __init__(self, message): - self.message = message - super().__init__(self.message) - - -class MolecularEntity(metaclass=ABCMeta): +class MolecularEntity( + BlenderObject, + metaclass=ABCMeta, +): def __init__(self) -> None: self.uuid: str = str(uuid1()) - self._object: bpy.types.Object | None self.type: str = "" + self._object: bpy.types.Object | None @property - def name(self) -> str: - obj = self.object - if obj is None: - return None - - return obj.name - - @name.setter - def name(self, value: str) -> None: - obj = self.object - if obj is None: - raise ObjectMissingError - obj.name = value - - @property - def object(self) -> bpy.types.Object | None: - # If we don't have connection to an object, attempt to re-stablish to a new - # object in the scene with the same UUID. This helps if duplicating / deleting - # objects in the scene, but sometimes Blender just loses reference to the object - # we are working with because we are manually setting the data on the mesh, - # which can wreak havoc on the object database. To protect against this, - # if we have a broken link we just attempt to find a new suitable object for it - try: - # if the connection is broken then trying to the name will raise a connection - # error. If we are loading from a saved session then the object_ref will be - # None and get an AttributeError - self._object.name - return self._object - except (ReferenceError, AttributeError): - for obj in bpy.data.objects: - if obj.mn.uuid == self.uuid: - print( - Warning( - f"Lost connection to object: {self._object}, now connected to {obj}" - ) - ) - self._object = obj - return obj - - return None - - @object.setter - def object(self, value): - if isinstance(value, bpy.types.Object) or value is None: - self._object = value - else: - raise TypeError(f"The `object` must be a Blender object, not {value=}") - - def named_attribute(self, name="position", evaluate=False) -> np.ndarray | None: - """ - Get the value of an object for the data molecule. - - Parameters - ---------- - name : str, optional - The name of the attribute. Default is 'position'. - evaluate : bool, optional - Whether to first evaluate all node trees before getting the requsted attribute. - False (default) will sample the underlying atomic geometry, while True will - sample the geometry that is created through the Geometry Nodes tree. - - Returns - ------- - np.ndarray - The value of the attribute. - """ - if self.object is None: - warnings.warn( - "No object yet created. Use `create_object()` to create a corresponding object." - ) - return None - return bl.mesh.named_attribute(self.object, name=name, evaluate=evaluate) - - def set_position(self, positions: np.ndarray) -> None: - "A slightly optimised way to set the positions of the object's mesh" - obj = self.object - attribute = obj.data.attributes["position"] - n_points = len(attribute.data) - if positions.shape != (n_points, 3): - raise AttributeError( - f"Expected an array of dimension {(n_points, 3)} to set the position" - / f"but got {positions.shape=}" - ) - - # actually set the data for the positions - attribute.data.foreach_set("vector", positions.reshape(-1)) - # trigger a depsgraph update. The second method is better but bugs out sometimes - # so we try the first method initially - try: - obj.data.vertices[0].co = obj.data.vertices[0].co # type: ignore - except AttributeError: - obj.data.update() # type: ignore - - def set_boolean(self, boolean: np.ndarray, name="boolean") -> None: - self.store_named_attribute(boolean, name=name, data_type="BOOLEAN") - - def store_named_attribute( - self, - data: np.ndarray, - name="NewAttribute", - data_type=None, - domain="POINT", - overwrite=True, - ): - """ - Set an attribute for the molecule. - - Parameters - ---------- - data : np.ndarray - The data to be set as the attribute. Must be of length equal to the length - of the domain. - name : str, optional - The name of the new attribute. Default is 'NewAttribute'. - type : str, optional - If value is None (Default), the data type is inferred. The data type of the - attribute. Possbible values are ('FLOAT_VECTOR', 'FLOAT_COLOR", 'QUATERNION', - 'FLOAT', 'INT', 'BOOLEAN'). - domain : str, optional - The domain of the attribute. Default is 'POINT'. Possible values are - currently ['POINT', 'EDGE', 'FACE', 'SPLINE'] - overwrite : bool, optional - Whether to overwrite an existing attribute with the same name, or create a - new attribute with always a unique name. Default is True. - """ - if not self.object: - warnings.warn( - "No object yet created. Use `create_object()` to create a corresponding object." - ) - return None - bl.mesh.store_named_attribute( - self.object, - name=name, - data=data, - data_type=data_type, - domain=domain, - overwrite=overwrite, - ) - - @classmethod - def list_attributes(cls, evaluate=False) -> list | None: - """ - Returns a list of attribute names for the object. - - Parameters - ---------- - evaluate : bool, optional - Whether to first evaluate the modifiers on the object before listing the - available attributes. - - Returns - ------- - list[str] | None - A list of attribute names if the molecule object exists, None otherwise. - """ - if not cls.object: - warnings.warn("No object created") - return None - if evaluate: - return list(bl.mesh.evaluated(cls.object).data.attributes.keys()) - - return list(cls.object.data.attributes.keys()) + def bob(self) -> BlenderObject: + return BlenderObject(self.object) diff --git a/molecularnodes/entities/molecule/molecule.py b/molecularnodes/entities/molecule/molecule.py index e9c76f33..79999f34 100644 --- a/molecularnodes/entities/molecule/molecule.py +++ b/molecularnodes/entities/molecule/molecule.py @@ -13,6 +13,8 @@ from ... import blender as bl from ... import color, data, utils +from ...bpyd import Domains, AttributeTypes +from ... import bpyd from ..entity import MolecularEntity @@ -38,7 +40,7 @@ class Molecule(MolecularEntity, metaclass=ABCMeta): frames : bpy.types.Collection The Blender collection which holds the objects making up the frames to animate. array: np.ndarray: - The numpy array which stores the atomic coordindates and associated attributes. + The numpy array which stores the atomic coordinates and associated attributes. entity_ids : np.ndarray The entity IDs of the molecule. chain_ids : np.ndarray @@ -50,29 +52,51 @@ class Molecule(MolecularEntity, metaclass=ABCMeta): Set an attribute on the object for the molecule. named_attribute(name='position') Get the value of an attribute on the object for the molecule. - create_object(name='NewMolecule', style='spheres', selection=None, build_assembly=False, centre = '', del_solvent=True, collection=None, verbose=False) + create_object(name='NewMolecule', style='spheres', selection=None, build_assembly=False, centre='', del_solvent=True, collection=None, verbose=False) Create a 3D model for the molecule, based on the values from self.array. assemblies(as_array=False) Get the biological assemblies of the molecule. """ def __init__(self, file_path: Union[str, Path, io.BytesIO]): + """ + Initialize the Molecule object. + + Parameters + ---------- + file_path : Union[str, Path, io.BytesIO] + The file path to the file which stores the atomic coordinates. + """ super().__init__() self._parse_filepath(file_path=file_path) self.file: str self.array: np.ndarray - self.frames: Optional[bpy.types.Collection] = None + self.frames: bpy.types.Collection | None = None self.frames_name: str = "" bpy.context.scene.MNSession.molecules[self.uuid] = self @classmethod def _read(self, file_path: Union[Path, io.BytesIO]): - """Initially open the file, ready to extract the required data""" + """ + Initially open the file, ready to extract the required data. + + Parameters + ---------- + file_path : Union[Path, io.BytesIO] + The file path to the file which stores the atomic coordinates. + """ pass def _parse_filepath(self, file_path: Union[Path, str, io.BytesIO]) -> None: - "If this is an actual file resolve the path - if a bytes IO resolve this as well." + """ + If this is an actual file resolve the path - if a bytes IO resolve this as well. + + Parameters + ---------- + file_path : Union[Path, str, io.BytesIO] + The file path to the file which stores the atomic coordinates. + """ if isinstance(file_path, io.BytesIO): self.file = self._read(file_path=file_path) elif isinstance(file_path, io.StringIO): @@ -82,6 +106,14 @@ def _parse_filepath(self, file_path: Union[Path, str, io.BytesIO]) -> None: self.file = self._read(self.file_path) def __len__(self) -> int: + """ + Get the number of atoms in the molecule. + + Returns + ------- + int + The number of atoms in the molecule. + """ if hasattr(self, "object"): if self.object: return len(self.object.data.vertices) @@ -92,6 +124,14 @@ def __len__(self) -> int: @property def n_models(self): + """ + Get the number of models in the molecule. + + Returns + ------- + int + The number of models in the molecule. + """ if isinstance(self.array, struc.AtomArray): return 1 else: @@ -99,31 +139,20 @@ def n_models(self): @property def chain_ids(self) -> Optional[list]: + """ + Get the unique chain IDs of the molecule. + + Returns + ------- + Optional[list] + The unique chain IDs of the molecule, or None if not available. + """ if self.array: if hasattr(self.array, "chain_id"): return np.unique(self.array.chain_id).tolist() return None - def centre(self, centre_type: str = "centroid") -> np.ndarray: - """ - Calculate the centre of mass/geometry of the Molecule object - - :return: np.ndarray of shape (3,) user-defined centroid of all atoms in - the Molecule object - """ - positions = self.named_attribute(name="position") - - if centre_type == "centroid": - return bl.mesh.centre(positions) - elif centre_type == "mass": - mass = self.named_attribute(name="mass") - return bl.mesh.centre_weighted(positions, mass) - else: - raise ValueError( - f"`{centre_type}` not a supported selection of ['centroid', 'mass']" - ) - def create_object( self, name: str = "NewMolecule", @@ -165,17 +194,20 @@ def create_object( behavior. del_solvent : bool, optional Whether to delete solvent molecules. Default is True. + del_hydrogen: bool, optional + Whether to delete hydrogen atoms. Default is False. collection : str, optional The collection to add the model to. Default is None. verbose : bool, optional Whether to print verbose output. Default is False. + color : Optional[str], optional + The color scheme to use for the model. Default is 'common'. Returns ------- bpy.types.Object The created 3D model, as an object in the 3D scene. """ - is_stack = isinstance(self.array, struc.AtomArrayStack) if selection: @@ -241,8 +273,7 @@ def assemblies(self, as_array=False): Parameters ---------- as_array : bool, optional - Whether to return the assemblies as an array of quaternions. - Default is False. + Whether to return the assemblies as an array of quaternions. Default is False. Returns ------- @@ -250,7 +281,6 @@ def assemblies(self, as_array=False): The biological assemblies of the molecule, as a dictionary of transformation matrices, or None if no assemblies are available. """ - try: assemblies_info = self._assemblies() except InvalidFileError: @@ -262,6 +292,14 @@ def assemblies(self, as_array=False): return assemblies_info def __repr__(self) -> str: + """ + Get the string representation of the Molecule object. + + Returns + ------- + str + The string representation of the Molecule object. + """ return f"" @@ -293,11 +331,9 @@ def _create_object( def centre_array(atom_array, centre): if centre == "centroid": - atom_array.coord -= bl.mesh.centre(atom_array.coord) + atom_array.coord -= bpyd.centre(atom_array.coord) elif centre == "mass": - atom_array.coord -= bl.mesh.centre_weighted( - array=atom_array.coord, weight=atom_array.mass - ) + atom_array.coord -= bpyd.centre(atom_array.coord, weight=atom_array.mass) if centre in ["mass", "centroid"]: if is_stack: @@ -324,7 +360,7 @@ def centre_array(atom_array, centre): bond_types = bonds_array[:, 2].copy(order="C") # creating the blender object and meshes and everything - obj = bl.mesh.create_object( + bob = bpyd.create_bob( name=name, collection=collection, vertices=array.coord * world_scale, @@ -336,8 +372,11 @@ def centre_array(atom_array, centre): # 'AROMATIC_SINGLE' = 5, 'AROMATIC_DOUBLE' = 6, 'AROMATIC_TRIPLE' = 7 # https://www.biotite-python.org/apidoc/biotite.structure.BondType.html#biotite.structure.BondType if array.bonds: - bl.mesh.store_named_attribute( - obj, name="bond_type", data=bond_types, data_type="INT", domain="EDGE" + bob.store_named_attribute( + data=bond_types, + name="bond_type", + atype=AttributeTypes.INT, + domain=Domains.EDGE, ) # The attributes for the model are initially defined as single-use functions. This allows @@ -393,7 +432,7 @@ def att_res_name(): res_nums.append(res_num) counter += 1 - obj["ligands"] = np.unique(other_res) + bob.object["ligands"] = np.unique(other_res) return np.array(res_nums) def att_chain_id(): @@ -623,17 +662,17 @@ def att_sec_struct(): if verbose: start = time.process_time() try: - bl.mesh.store_named_attribute( - obj, - name=att["name"], + bob.store_named_attribute( data=att["value"](), - data_type=att["type"], + name=att["name"], + atype=att["type"], domain=att["domain"], ) if verbose: print(f'Added {att["name"]} after {time.process_time() - start} s') - except: + except Exception as e: if verbose: + print(e) warnings.warn(f"Unable to add attribute: {att['name']}") print( f'Failed adding {att["name"]} after {time.process_time() - start} s' @@ -641,26 +680,20 @@ def att_sec_struct(): coll_frames = None if frames: - coll_frames = bl.coll.frames(obj.name, parent=bl.coll.data()) + coll_frames = bl.coll.frames(bob.name) for i, frame in enumerate(frames): - frame = bl.mesh.create_object( - name=obj.name + "_frame_" + str(i), + frame = bpyd.create_object( + name=bob.name + "_frame_" + str(i), collection=coll_frames, vertices=frame.coord * world_scale, - # vertices=frame.coord * world_scale - centroid ) - # TODO if update_attribute - # bl.mesh.store_named_attribute(attribute) - - # this has started to throw errors for me. I'm not sure why. - # mol.mn['molcule_type'] = 'pdb' # add custom properties to the actual blender object, such as number of chains, biological assemblies etc # currently biological assemblies can be problematic to holding off on doing that try: - obj["chain_ids"] = list(np.unique(array.chain_id)) + bob.object["chain_ids"] = list(np.unique(array.chain_id)) except AttributeError: - obj["chain_ids"] = None + bob.object["chain_ids"] = None warnings.warn("No chain information detected.") - return obj, coll_frames + return bob.object, coll_frames diff --git a/molecularnodes/entities/molecule/pdbx.py b/molecularnodes/entities/molecule/pdbx.py index cfc57bd1..32279185 100644 --- a/molecularnodes/entities/molecule/pdbx.py +++ b/molecularnodes/entities/molecule/pdbx.py @@ -1,5 +1,4 @@ import itertools -import warnings import biotite.structure as struc import biotite.structure.io.pdbx as pdbx @@ -17,40 +16,32 @@ def __init__(self, file_path): def entity_ids(self): return self.file.block.get("entity").get("pdbx_description").as_array().tolist() - def _get_entity_id(self, array, file): - chain_ids = file.block["entity_poly"]["pdbx_strand_id"].as_array(str) - - # the chain_ids are an array of individual items np.array(['A,B', 'C', 'D,E,F']) - # which need to be categorised as [1, 1, 2, 3, 3, 3] for their belonging to individual - # entities - - chains = [] - idx = [] - for i, chain_str in enumerate(chain_ids): - for chain in chain_str.split(","): - chains.append(chain) - idx.append(i) + @classmethod + def set_extra_annotations( + cls, + array: struc.AtomArray | struc.AtomArrayStack, + file: pdbx.PDBxFile, + verbose: bool = False, + ) -> None: + extra_annotations = { + "sec_struct": cls._get_secondary_structure, + "entity_id": cls._get_entity_id, + } + for name, func in extra_annotations.items(): + try: + array.set_annotation(name, func(array, file)) + except KeyError as e: + if verbose: + print(f"Unable to add {name} as an attribute, error: {e}") + pass - entity_lookup = dict(zip(chains, idx)) - chain_id_int = np.array( - [entity_lookup.get(chain, -1) for chain in array.chain_id], int - ) - return chain_id_int + return array def get_structure( self, extra_fields=["b_factor", "occupancy", "atom_id"], bonds=True ): array = pdbx.get_structure(self.file, extra_fields=extra_fields) - try: - array.set_annotation( - "sec_struct", self._get_secondary_structure(array=array, file=self.file) - ) - except KeyError: - warnings.warn("No secondary structure information.") - try: - array.set_annotation("entity_id", self._get_entity_id(array, self.file)) - except KeyError: - warnings.warn("No entity ID information") + array = self.set_extra_annotations(array, self.file) if not array.bonds and bonds: array.bonds = struc.bonds.connect_via_residue_names( @@ -62,48 +53,8 @@ def get_structure( def _assemblies(self): return CIFAssemblyParser(self.file).get_assemblies() - # # in the cif / BCIF file 3x4 transformation matrices are stored in individual - # # columns, this extracts them and returns them with additional row for scaling, - # # meaning an (n, 4, 4) array is returned, where n is the number of transformations - # # and each is a 4x4 transformaiton matrix - # cat_matrix = self.file.block['pdbx_struct_oper_list'] - # matrices = self._extract_matrices(cat_matrix) - - # # sometimes there will be missing opers / matrices. For example in the - # # 'square.bcif' file, the matrix IDs go all the way up to 18024, but only - # # 18023 matrices are defined. That is becuase matrix 12 is never referenced, so - # # isn't included in teh file. To get around this we have to just get the specific - # # IDs that are defined for the matrices and use that to lookup the correct index - # # in the matrices array. - # mat_ids = cat_matrix.get('id').as_array(int) - # mat_lookup = dict(zip(mat_ids, range(len(mat_ids)))) - - # category = self.file.block['pdbx_struct_assembly_gen'] - # ids = category['assembly_id'].as_array(int) - # opers = category['oper_expression'].as_array(str) - # asyms = category['asym_id_list'].as_array() - - # # constructs a dictionary of - # # { - # # '1': ((['A', 'B', C'], [4x4 matrix]), (['A', 'B'], [4x4 matrix])), - # # '2': ((['A', 'B', C'], [4x4 matrix])) - # # } - # # where each entry in the dictionary is a biological assembly, and each dictionary - # # value contains a list of tranasformations which need to be applied. Each entry in - # # the list of transformations is - # # ([chains to be affected], [4x4 transformation matrix]) - # assembly_dic = {} - # for idx, oper, asym in zip(ids, opers, asyms): - # trans = list() - # asym = asym.split(',') - # for op in _parse_opers(oper): - # i = int(op) - # trans.append((asym, matrices[mat_lookup[i]].tolist())) - # assembly_dic[str(idx)] = trans - - # return assembly_dic - - def _extract_matrices(self, category): + @staticmethod + def _extract_matrices(category): matrix_columns = [ "matrix[1][1]", "matrix[1][2]", @@ -129,7 +80,29 @@ def _extract_matrices(self, category): return matrices - def _get_secondary_structure(self, file, array): + @staticmethod + def _get_entity_id(array, file): + chain_ids = file.block["entity_poly"]["pdbx_strand_id"].as_array(str) + + # the chain_ids are an array of individual items np.array(['A,B', 'C', 'D,E,F']) + # which need to be categorised as [1, 1, 2, 3, 3, 3] for their belonging to individual + # entities + + chains = [] + idx = [] + for i, chain_str in enumerate(chain_ids): + for chain in chain_str.split(","): + chains.append(chain) + idx.append(i) + + entity_lookup = dict(zip(chains, idx)) + chain_id_int = np.array( + [entity_lookup.get(chain, -1) for chain in array.chain_id], int + ) + return chain_id_int + + @staticmethod + def _get_secondary_structure(array, file): """ Get secondary structure information for the array from the file. diff --git a/molecularnodes/entities/molecule/ui.py b/molecularnodes/entities/molecule/ui.py index f574d7df..74c4113a 100644 --- a/molecularnodes/entities/molecule/ui.py +++ b/molecularnodes/entities/molecule/ui.py @@ -44,16 +44,16 @@ def parse(filepath) -> Molecule: def fetch( - pdb_code, - style="spheres", - centre="", - del_solvent=True, - del_hydrogen=False, - cache_dir=None, - build_assembly=False, + pdb_code: str, + style: str | None="spheres", + centre: str="", + del_solvent: bool=True, + del_hydrogen: bool=False, + cache_dir: str | None=None, + build_assembly: bool=False, database: str = "rcsb", - format="bcif", - color="common", + format: str="bcif", + color: str="common", ) -> Molecule: if build_assembly: centre = "" diff --git a/molecularnodes/entities/trajectory/dna.py b/molecularnodes/entities/trajectory/dna.py index 6c823fa4..36759a29 100644 --- a/molecularnodes/entities/trajectory/dna.py +++ b/molecularnodes/entities/trajectory/dna.py @@ -2,6 +2,8 @@ import bpy from ... import color from ...blender import mesh, coll, nodes +from ... import bpyd +from ...bpyd import AttributeTypes bpy.types.Scene.MN_import_oxdna_topology = bpy.props.StringProperty( name="Toplogy", @@ -176,7 +178,7 @@ def read_trajectory(filepath): return np.stack(frames) -def store_named_attributes_to_dna_mol(mol, frame, scale_dna=0.1): +def store_named_attributes_to_dna_mol(obj, frame, scale_dna=0.1): attributes = ("base_vector", "base_normal", "velocity", "angular_velocity") for i, att in enumerate(attributes): col_idx = np.array([3, 4, 5]) + i * 3 @@ -190,7 +192,9 @@ def store_named_attributes_to_dna_mol(mol, frame, scale_dna=0.1): if att != "angular_velocity": data *= scale_dna - mesh.store_named_attribute(mol, att, data, data_type="FLOAT_VECTOR") + bpyd.store_named_attribute( + obj=obj, data=data, name=att, atype=AttributeTypes.FLOAT_VECTOR + ) def toplogy_to_bond_idx_pairs(topology: np.ndarray): @@ -252,7 +256,7 @@ def load(top, traj, name="oxDNA", setup_nodes=True, world_scale=0.01): # creat toplogy object with positions of the first frame, and the bonds from the # topology object - obj = mesh.create_object( + obj = bpyd.create_object( name=name, collection=coll.mn(), vertices=trajectory[0][:, 0:3] * scale_dna, @@ -260,13 +264,17 @@ def load(top, traj, name="oxDNA", setup_nodes=True, world_scale=0.01): ) # adding additional toplogy information from the topology and frames objects - mesh.store_named_attribute(obj, "res_name", topology[:, 1], "INT") - mesh.store_named_attribute(obj, "chain_id", topology[:, 0], "INT") - mesh.store_named_attribute( - obj, - "Color", + bpyd.store_named_attribute( + obj=obj, data=topology[:, 1], name="res_name", atype=AttributeTypes.INT + ) + bpyd.store_named_attribute( + obj=obj, data=topology[:, 0], name="chain_id", atype=AttributeTypes.INT + ) + bpyd.store_named_attribute( + obj=obj, data=color.color_chains_equidistant(topology[:, 0]), - data_type="FLOAT_COLOR", + name="Color", + atype=AttributeTypes.FLOAT_COLOR, ) store_named_attributes_to_dna_mol(obj, trajectory[0], scale_dna=scale_dna) @@ -281,11 +289,11 @@ def load(top, traj, name="oxDNA", setup_nodes=True, world_scale=0.01): # create a collection to store all of the frame objects that are part of the trajectory # they will contain all of the possible attributes which can be interpolated betewen # frames such as position, base_vector, base_normal, velocity, angular_velocity - collection = coll.frames(f"{name}_frames", parent=coll.data()) + collection = coll.frames(name) for i, frame in enumerate(trajectory): fill_n = int(np.ceil(np.log10(n_frames))) frame_name = f"{name}_frame_{str(i).zfill(fill_n)}" - frame_obj = mesh.create_object( + frame_obj = bpyd.create_object( frame[:, 0:3] * scale_dna, name=frame_name, collection=collection ) store_named_attributes_to_dna_mol(frame_obj, frame, scale_dna) diff --git a/molecularnodes/entities/trajectory/trajectory.py b/molecularnodes/entities/trajectory/trajectory.py index 928b5b9d..bad87105 100644 --- a/molecularnodes/entities/trajectory/trajectory.py +++ b/molecularnodes/entities/trajectory/trajectory.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Callable +from typing import Dict, Callable import bpy import MDAnalysis as mda @@ -6,14 +6,15 @@ import numpy.typing as npt from ... import data -from ..entity import MolecularEntity, ObjectMissingError -from ...blender import coll, mesh, nodes, path_resolve -from ...utils import lerp, correct_periodic_positions +from ..entity import MolecularEntity +from ...blender import coll, nodes, path_resolve +from ... import bpyd +from ...utils import correct_periodic_positions from .selections import Selection, TrajectorySelectionItem class Trajectory(MolecularEntity): - def __init__(self, universe: mda.Universe, world_scale=0.01): + def __init__(self, universe: mda.Universe, world_scale: float = 0.01): super().__init__() self.universe: mda.Universe = universe self.selections: Dict[str, Selection] = {} @@ -43,8 +44,6 @@ def add_selection( ) -> TrajectorySelectionItem: "Adds a new selection with the given name, selection string and selection parameters." obj = self.object - # if obj is None: - # raise ObjectMissingError("Universe contains no object to add seleciton to") obj.mn_trajectory_selections.add() sel = obj.mn_trajectory_selections[-1] @@ -78,7 +77,7 @@ def add_selection_from_atomgroup(self, atomgroup: mda.AtomGroup, name: str = "") def apply_selection(self, selection: Selection): "Set the boolean attribute for this selection on the mesh of the object" - self.set_boolean(name=selection.name, boolean=selection.to_mask()) + self.set_boolean(selection.to_mask(), name=selection.name) @property def subframes(self): @@ -140,41 +139,34 @@ def bool_selection(ag, selection, **kwargs) -> np.ndarray: return np.isin(ag.ix, ag.select_atoms(selection, **kwargs).ix).astype(bool) @property - def positions(self) -> np.ndarray: + def univ_positions(self) -> np.ndarray: return self.atoms.positions * self.world_scale @property - def bonds(self) -> List[List[int]]: + def bonds(self) -> np.ndarray: + # the code to remap indices for a selection was removed as we don't subset the trajectory anymore + # when importing it, everything is imported and the selections just update if hasattr(self.atoms, "bonds"): - bond_indices = self.atoms.bonds.indices - atm_indices = self.atoms.indices - bond_filtering = np.all(np.isin(bond_indices, atm_indices), axis=1) - bond_indices = bond_indices[bond_filtering] - index_map = { - index: i for i, index in enumerate(self.universe.atoms.indices) - } - - bonds = [[index_map[bond[0]], index_map[bond[1]]] for bond in bond_indices] + return self.atoms.bonds.indices else: - bonds = [] - return bonds + return None @property - def elements(self) -> List[str]: + def elements(self) -> np.ndarray: + if hasattr(self.atoms, "elements"): + return self.atoms.elements + try: - elements = self.atoms.elements.tolist() - except Exception: - try: - elements = [ - x - if x in data.elements.keys() - else mda.topology.guessers.guess_atom_element(x) - for x in self.atoms.names - ] + guessed_elements = [ + x + if x in data.elements.keys() + else mda.topology.guessers.guess_atom_element(x) + for x in self.atoms.names + ] + return np.array(guessed_elements) - except Exception: - elements = ["X"] * self.atoms.n_atoms - return elements + except Exception: + return np.repeat("X", self.n_atoms) @property def atomic_number(self) -> np.ndarray: @@ -187,7 +179,6 @@ def atomic_number(self) -> np.ndarray: @property def vdw_radii(self) -> np.ndarray: - # pm to Angstrom return ( np.array( [ @@ -195,25 +186,21 @@ def vdw_radii(self) -> np.ndarray: for element in self.elements ] ) - * 0.01 - * self.world_scale + * 0.01 # pm to Angstrom + * self.world_scale # Angstrom to world scale ) @property def mass(self) -> np.ndarray: # units: daltons - try: - masses = np.array([x.mass for x in self.atoms]) - except mda.exceptions.NoDataError: - masses = np.array( - [ - data.elements.get(element, {"standard_mass": 0}).get( - "standard_mass" - ) - for element in self.elements - ] - ) - return masses + if hasattr(self.atoms, "masses"): + return np.array([x.mass for x in self.atoms]) + else: + masses = [ + data.elements.get(element, {"standard_mass": 0}).get("standard_mass") + for element in self.elements + ] + return np.array(masses) @property def res_id(self) -> np.ndarray: @@ -430,15 +417,21 @@ def create_object( subframes: int = 0, # in_memory: bool = False, ): - obj = mesh.create_object( - name=name, collection=coll.mn(), vertices=self.positions, edges=self.bonds + obj = bpyd.create_object( + name=name, + collection=coll.mn(), + vertices=self.univ_positions, + edges=self.bonds, ) self.object = obj for att_name, att in self._attributes_2_blender.items(): try: - mesh.store_named_attribute( - obj, att_name, att["value"], att["type"], att["domain"] + self.store_named_attribute( + data=att["value"], + name=att_name, + atype=att["type"], + domain=att["domain"], ) except Exception as e: print(e) @@ -467,7 +460,7 @@ def create_object( def _update_calculations(self): for name, func in self.calculations.items(): try: - self.store_named_attribute(name=name, data=func(self.universe)) + self.store_named_attribute(data=func(self.universe), name=name) except Exception as e: print(e) @@ -531,10 +524,7 @@ def _update_positions(self, frame): universe = self.universe frame_mapping = self.frame_mapping obj = self.object - if obj is None: - raise ObjectMissingError( - "Object is deleted and unable to establish a connection with a new Blender Object." - ) + subframes: int = obj.mn.subframes interpolate: bool = obj.mn.interpolate offset: int = obj.mn.offset @@ -574,11 +564,11 @@ def _update_positions(self, frame): fraction = frame % (subframes + 1) / (subframes + 1) # get the positions for the next frame - positions_a = self.positions + positions_a = self.univ_positions if frame_b < universe.trajectory.n_frames: self.frame = frame_b - positions_b = self.positions + positions_b = self.univ_positions if obj.mn.correct_periodic and self.is_orthorhombic: positions_b = correct_periodic_positions( @@ -588,12 +578,9 @@ def _update_positions(self, frame): ) # interpolate between the two sets of positions - positions = lerp(positions_a, positions_b, t=fraction) + self.position = bpyd.lerp(positions_a, positions_b, t=fraction) else: - positions = self.positions - - # update the positions of the underlying vertices - self.set_position(positions) + self.position = self.univ_positions def __repr__(self): return f" logging.Logger: - """ - Configure and start logging to a file. - - Parameters - ---------- - logfile_name : str, optional - The name of the log file. Defaults to 'side-packages-install'. - - Returns - ------- - logging.Logger - A Logger object that can be used to write log messages. - - This function sets up a logging configuration with a specified log file name and logging level. - The log file will be created in the `ADDON_DIR/logs` directory. If the directory - does not exist, it will be created. The function returns a Logger object that can be used to - write log messages. - - """ - # Create the logs directory if it doesn't exist - logs_dir = os.path.join(os.path.abspath(ADDON_DIR), "logs") - os.makedirs(logs_dir, exist_ok=True) - - # Set up logging configuration - logfile_path = os.path.join(logs_dir, f"{logfile_name}.log") - logging.basicConfig(filename=logfile_path, level=logging.INFO) - - # Return logger object - return logging.getLogger() diff --git a/molecularnodes/operators/node_add_buttons.py b/molecularnodes/operators/node_add_buttons.py index c616b82a..4bdb6894 100644 --- a/molecularnodes/operators/node_add_buttons.py +++ b/molecularnodes/operators/node_add_buttons.py @@ -3,6 +3,7 @@ from bpy.props import BoolProperty, EnumProperty, IntProperty, StringProperty from ..blender import nodes +from .. import bpyd from ..ui import node_info @@ -69,17 +70,15 @@ def description(cls, context, properties): return properties.node_description def execute(self, context): - # we use the DuplicatePrevention to cleanup internal node duplication on appending - # as Blender doesn't currently do a great job of reusing datablocks - with nodes.DuplicatePrevention(): - try: - nodes.append(self.node_name, link=self.node_link) - _add_node(self.node_name, context) # , label=self.node_label) - except RuntimeError: - self.report( - {"ERROR"}, - message="Failed to add node. Ensure you are not in edit mode.", - ) + try: + nodes.append(self.node_name, link=self.node_link) + _add_node(self.node_name, context) # , label=self.node_label) + except RuntimeError: + self.report( + {"ERROR"}, + message="Failed to add node. Ensure you are not in edit mode.", + ) + return {"CANCELLED"} return {"FINISHED"} @@ -105,7 +104,7 @@ def poll(self, context): def execute(self, context): obj = context.active_object - with nodes.DuplicatePrevention(): + with bpyd.nodes.DuplicatePrevention(): try: if self.inset_node: nodes.assembly_insert(obj) @@ -158,7 +157,7 @@ def execute(self, context): prefix = {"BOOLEAN": "Select", "RGBA": "Color"}[self.dtype] node_name = " ".join([prefix, self.node_name, name]) - with nodes.DuplicatePrevention(): + with bpyd.nodes.DuplicatePrevention(): node_chains = nodes.custom_iswitch( name=node_name, dtype=self.dtype, diff --git a/molecularnodes/ui/menu.py b/molecularnodes/ui/menu.py index 8fc4c8b5..fe6b6578 100644 --- a/molecularnodes/ui/menu.py +++ b/molecularnodes/ui/menu.py @@ -106,6 +106,7 @@ def menu( op.description = f"Choose custom selections for {self.label}" else: raise ValueError(f"Data type currently not supported: {self.dtype}") + # test if the object has the currently tested property to enable operator row.enabled = bool(context.active_object.get(self.property_id)) diff --git a/molecularnodes/ui/panel.py b/molecularnodes/ui/panel.py index e7dc8c74..a04db68f 100644 --- a/molecularnodes/ui/panel.py +++ b/molecularnodes/ui/panel.py @@ -110,13 +110,15 @@ def panel_import(layout, context): chosen_panel[selection](col, scene) -def ui_from_node(layout, node): +def ui_from_node( + layout: bpy.types.UILayout, node: bpy.types.NodeGroup, context: bpy.types.Context +): """ Generate the UI for a particular node, which displays the relevant node inputs for user control in a panel, rather than through the node editor. """ col = layout.column(align=True) - ntree = bpy.context.active_object.modifiers["MolecularNodes"].node_group + ntree = context.active_object.modifiers["MolecularNodes"].node_group tree = node.node_tree.interface.items_tree @@ -213,7 +215,7 @@ def panel_object(layout, context): if mol_type == "star": layout.label(text="Ensemble") box = layout.box() - ui_from_node(box, nodes.get_star_node(object)) + ui_from_node(box, nodes.get_star_node(object), context=context) return None diff --git a/molecularnodes/utils.py b/molecularnodes/utils.py index eb2ec6b6..c229a633 100644 --- a/molecularnodes/utils.py +++ b/molecularnodes/utils.py @@ -1,14 +1,23 @@ import os -from pathlib import Path - +import sys import numpy as np + +from pathlib import Path from mathutils import Matrix ADDON_DIR = Path(__file__).resolve().parent MN_DATA_FILE = os.path.join(ADDON_DIR, "assets", "MN_data_file_4.2.blend") -def correct_periodic_1d(value1, value2, boundary): +def add_current_module_to_path(): + path = str(ADDON_DIR.parent) + print(path) + sys.path.append(path) + + +def correct_periodic_1d( + value1: np.ndarray, value2: np.ndarray, boundary: float +) -> np.ndarray: diff = value2 - value1 half = boundary / 2 value2[diff > half] -= boundary @@ -16,7 +25,9 @@ def correct_periodic_1d(value1, value2, boundary): return value2 -def correct_periodic_positions(positions_1, positions_2, dimensions): +def correct_periodic_positions( + positions_1: np.ndarray, positions_2: np.ndarray, dimensions: np.ndarray +) -> np.ndarray: if not np.allclose(dimensions[3:], 90.0): raise ValueError( f"Only works with orthorhombic unitcells, and not dimensions={dimensions}" @@ -29,44 +40,6 @@ def correct_periodic_positions(positions_1, positions_2, dimensions): return final_positions -def lerp(a: np.ndarray, b: np.ndarray, t: float = 0.5) -> np.ndarray: - """ - Linearly interpolate between two values. - - Parameters - ---------- - a : array_like - The starting value. - b : array_like - The ending value. - t : float, optional - The interpolation parameter. Default is 0.5. - - Returns - ------- - array_like - The interpolated value(s). - - Notes - ----- - This function performs linear interpolation between `a` and `b` using the - interpolation parameter `t` such that the result lies between `a` and `b`. - - Examples - -------- - >>> lerp(1, 2, 0.5) - 1.5 - - >>> lerp(3, 7, 0.2) - 3.8 - - >>> lerp([1, 2, 3], [4, 5, 6], 0.5) - array([2.5, 3.5, 4.5]) - - """ - return np.add(a, np.multiply(np.subtract(b, a), t)) - - # data types for the np.array that will store per-chain symmetry operations dtype = [ ("assembly_id", int), diff --git a/tests/test_attributes.py b/tests/test_attributes.py index 8f461d49..903cdbdf 100644 --- a/tests/test_attributes.py +++ b/tests/test_attributes.py @@ -1,11 +1,12 @@ -import molecularnodes as mn -import pytest import itertools + import numpy as np +import pytest +import molecularnodes as mn +from .constants import attributes, codes, data_dir from .utils import sample_attribute -from .constants import codes, attributes, data_dir mn._test_register() diff --git a/tests/test_coll.py b/tests/test_coll.py index 07abbacc..c01236ac 100644 --- a/tests/test_coll.py +++ b/tests/test_coll.py @@ -4,9 +4,7 @@ def test_coll(): assert mn.blender.coll.mn().name == "MolecularNodes" assert mn.blender.coll.mn().name == "MolecularNodes" - assert mn.blender.coll.data().name == "MN_data" + assert mn.blender.coll.data().name == ".MN_data" assert mn.blender.coll.cellpack().name == "cellpack_" - assert mn.blender.coll.cellpack(fallback=True).name == "cellpack_" - assert mn.blender.coll.cellpack().name == "cellpack_.001" - assert mn.blender.coll.frames().name == "_frames" - assert mn.blender.coll.frames("4OZS").name == "4OZS_frames" + assert mn.blender.coll.frames().name == ".data__frames" + assert mn.blender.coll.frames("4OZS").name == ".data_4OZS_frames" diff --git a/tests/test_density.py b/tests/test_density.py index 6e470dc5..0069ead5 100644 --- a/tests/test_density.py +++ b/tests/test_density.py @@ -29,7 +29,7 @@ def density_file(): def test_density_load(density_file): obj = mn.entities.density.load(density_file).object evaluated = mn.blender.mesh.evaluate_using_mesh(obj) - pos = mn.blender.mesh.named_attribute(evaluated, "position") + pos = mn.bpyd.named_attribute(evaluated, "position") assert len(pos) > 1000 @@ -50,7 +50,7 @@ def test_density_centered(density_file): obj = mn.entities.density.load(density_file, center=True, overwrite=True).object evaluated = mn.blender.mesh.evaluate_using_mesh(obj) - pos = mn.blender.mesh.named_attribute(evaluated, "position") + pos = mn.bpyd.named_attribute(evaluated, "position") assert len(pos) > 1000 @@ -69,7 +69,7 @@ def test_density_invert(density_file): style_node.inputs["Threshold"].default_value = 0.01 evaluated = mn.blender.mesh.evaluate_using_mesh(obj) - pos = mn.blender.mesh.named_attribute(evaluated, "position") + pos = mn.bpyd.named_attribute(evaluated, "position") # At this threshold after inverting we should have a cube the size of the volume assert pos[:, 0].max() > 2.0 assert pos[:, 1].max() > 2.0 diff --git a/tests/test_load.py b/tests/test_load.py index 8e17d178..e300c245 100644 --- a/tests/test_load.py +++ b/tests/test_load.py @@ -64,7 +64,7 @@ def test_download_format(code, format): mol2 = o def verts(object): - return mn.blender.mesh.named_attribute(object, "position") + return mn.bpyd.named_attribute(object, "position") assert np.isclose(verts(mol), verts(mol2)).all() @@ -83,8 +83,8 @@ def test_centring(snapshot_custom: NumpySnapshotExtension, code, centre_method): centre_method. Check the CoG and CoM values against the snapshot file. """ mol = mn.entities.fetch(code, centre=centre_method, cache_dir=data_dir) - CoG = mol.centre() - CoM = mol.centre(centre_type="mass") + CoG = mol.centroid() + CoM = mol.centroid(weight="mass") if centre_method == "centroid": assert np.linalg.norm(CoG) < 1e-06 @@ -107,11 +107,9 @@ def test_centring_different(code): for method in centre_methods ] for mol1, mol2 in itertools.combinations(mols, 2): + assert not np.allclose(mol1.centroid(), mol2.centroid()) assert not np.allclose( - mol1.centre(centre_type="centroid"), mol2.centre(centre_type="centroid") - ) - assert not np.allclose( - mol1.centre(centre_type="mass"), mol2.centre(centre_type="mass") + mol1.centroid(weight="mass"), mol2.centroid(weight="mass") ) assert not np.allclose( mol1.named_attribute("position"), mol2.named_attribute("position") diff --git a/tests/test_nodes.py b/tests/test_nodes.py index b36c52c2..b9ecbddf 100644 --- a/tests/test_nodes.py +++ b/tests/test_nodes.py @@ -13,17 +13,6 @@ mn._test_register() -def test_node_name_format(): - assert mn.blender.nodes.format_node_name("Style Cartoon") == "Style Cartoon" - assert ( - mn.blender.nodes.format_node_name("MN_dna_double_helix") == "DNA Double Helix" - ) - assert ( - mn.blender.nodes.format_node_name("MN_topo_vector_angle") - == "Topology Vector Angle" - ) - - def test_get_nodes(): obj = mn.entities.fetch("4ozs", style="spheres", cache_dir=data_dir).object @@ -266,16 +255,14 @@ def test_node_topology(snapshot_custom: NumpySnapshotExtension, code, node_name) group.links.new(output, input) - assert snapshot_custom == mn.blender.mesh.named_attribute( + assert snapshot_custom == mn.bpyd.named_attribute( mol.object, "test_attribute", evaluate=True ) def test_topo_bonds(): - mol = mn.entities.fetch( - "1BNA", del_solvent=True, style=None, cache_dir=data_dir - ).object - group = nodes.get_mod(mol).node_group = nodes.new_group() + mol = mn.entities.fetch("1BNA", del_solvent=True, style=None, cache_dir=data_dir) + group = nodes.get_mod(mol.object).node_group = nodes.new_group() # add the node that will break bonds, set the cutoff to 0 node_break = nodes.add_custom(group, "Topology Break Bonds") @@ -283,8 +270,8 @@ def test_topo_bonds(): node_break.inputs["Cutoff"].default_value = 0 # compare the number of edges before and after deleting them with - bonds = mol.data.edges - no_bonds = mn.blender.mesh.evaluated(mol).data.edges + bonds = mol.object.data.edges + no_bonds = mol.evaluate().object.data.edges assert len(bonds) > len(no_bonds) assert len(no_bonds) == 0 @@ -292,5 +279,5 @@ def test_topo_bonds(): # are the same (other attributes will be different, but for now this is good) node_find = nodes.add_custom(group, "Topology Find Bonds") nodes.insert_last_node(group, node=node_find) - bonds_new = mn.blender.mesh.evaluated(mol).data.edges + bonds_new = mol.evaluate().edges assert len(bonds) == len(bonds_new) diff --git a/tests/test_nodes_geometry.py b/tests/test_nodes_geometry.py index 8f74c8a1..e0c23541 100644 --- a/tests/test_nodes_geometry.py +++ b/tests/test_nodes_geometry.py @@ -1,7 +1,8 @@ import numpy as np import molecularnodes as mn -from molecularnodes.blender import mesh, nodes +from molecularnodes.blender import nodes +from molecularnodes import bpyd from .constants import data_dir @@ -26,8 +27,8 @@ def test_centre_on_selection(): chain_id = mol.named_attribute("chain_id") chain_ids = np.unique(chain_id) - old_centres = [mesh.centre(old_pos[chain_id == x]) for x in chain_ids] - new_centres = [mesh.centre(new_pos[chain_id == x]) for x in chain_ids] + old_centres = [bpyd.centre(old_pos[chain_id == x]) for x in chain_ids] + new_centres = [bpyd.centre(new_pos[chain_id == x]) for x in chain_ids] assert not np.allclose(old_centres, new_centres) assert np.allclose( diff --git a/tests/test_obj.py b/tests/test_obj.py index f340e2e1..c98925ee 100644 --- a/tests/test_obj.py +++ b/tests/test_obj.py @@ -1,7 +1,7 @@ -import bpy import numpy as np import molecularnodes as mn from molecularnodes.blender import mesh +from molecularnodes import bpyd from .constants import data_dir mn.register() @@ -13,7 +13,7 @@ def test_creat_obj(): locations = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0]] bonds = [(0, 1), (1, 2), (2, 0)] name = "MyMesh" - my_object = mesh.create_object(locations, bonds, name=name) + my_object = bpyd.create_object(locations, bonds, name=name) assert len(my_object.data.vertices) == 3 assert my_object.name == name @@ -25,9 +25,7 @@ def test_set_position(): pos_a = mol.named_attribute("position") - mol.store_named_attribute( - data=mol.named_attribute("position") + 10, name="position" - ) + mol.store_named_attribute(mol.named_attribute("position") + 10, name="position") pos_b = mol.named_attribute("position") print(f"{pos_a=}") @@ -38,23 +36,25 @@ def test_set_position(): def test_eval_mesh(): - a = mesh.create_object(np.zeros((3, 3))) + a = bpyd.create_object(np.zeros((3, 3))) assert len(a.data.vertices) == 3 - b = mesh.create_object(np.zeros((5, 3))) + b = bpyd.create_object(np.zeros((5, 3))) assert len(b.data.vertices) == 5 assert len(mesh.evaluate_using_mesh(b).data.vertices) == 5 def test_matrix_read_write(): - obj = mesh.create_object(np.zeros((5, 3))) + obj = bpyd.create_object(np.zeros((5, 3))) arr = np.array((5, 4, 4), float) arr = np.random.rand(5, 4, 4) - mesh.store_named_attribute(obj, "test_matrix", arr, "FLOAT4X4") + bpyd.store_named_attribute( + obj=obj, data=arr, name="test_matrix", atype=bpyd.AttributeTypes.FLOAT4X4 + ) - assert np.allclose(mesh.named_attribute(obj, "test_matrix"), arr) + assert np.allclose(bpyd.named_attribute(obj, "test_matrix"), arr) arr2 = np.random.rand(5, 4, 4) - mesh.store_named_attribute(obj, "test_matrix2", arr2) + bpyd.store_named_attribute(obj=obj, data=arr2, name="test_matrix2") - assert not np.allclose(mesh.named_attribute(obj, "test_matrix2"), arr) + assert not np.allclose(bpyd.named_attribute(obj, "test_matrix2"), arr) diff --git a/tests/test_ops.py b/tests/test_ops.py index 100473fd..a05b7cef 100644 --- a/tests/test_ops.py +++ b/tests/test_ops.py @@ -3,7 +3,7 @@ import numpy as np import molecularnodes as mn -from molecularnodes.blender.mesh import ObjectTracker, named_attribute +from molecularnodes.bpyd import ObjectTracker, named_attribute from .utils import sample_attribute, NumpySnapshotExtension from .constants import data_dir, codes, attributes diff --git a/tests/test_select.py b/tests/test_select.py index 56469b75..ffe5ab0e 100644 --- a/tests/test_select.py +++ b/tests/test_select.py @@ -1,6 +1,5 @@ -import molecularnodes as mn +from molecularnodes import bpyd from molecularnodes.blender import nodes -import bpy import numpy as np import pytest @@ -12,12 +11,6 @@ def create_debug_group(name="MolecularNodesDebugGroup"): return group -def evaluate(object): - object.update_tag() - dg = bpy.context.evaluated_depsgraph_get() - return object.evaluated_get(dg) - - custom_selections = [ ("1, 3, 5-7", np.array((1, 3, 5, 6, 7))), ("5, 9-20", np.append(5, np.arange(9, 21))), @@ -28,10 +21,13 @@ def evaluate(object): @pytest.mark.parametrize("selection", custom_selections) def test_select_multiple_residues(selection): n_atoms = 100 - object = mn.blender.mesh.create_object(np.zeros((n_atoms, 3))) - mn.blender.mesh.store_named_attribute(object, "res_id", np.arange(n_atoms) + 1) + bob = bpyd.create_bob(np.zeros((n_atoms, 3))) + bob.store_named_attribute( + data=np.arange(n_atoms) + 1, + name="res_id", + ) - mod = nodes.get_mod(object) + mod = nodes.get_mod(bob.object) group = nodes.new_group(fallback=False) mod.node_group = group sep = group.nodes.new("GeometryNodeSeparateGeometry") @@ -41,9 +37,6 @@ def test_select_multiple_residues(selection): node_sel = nodes.add_custom(group, node_sel_group.name) group.links.new(node_sel.outputs["Selection"], sep.inputs["Selection"]) - vertices_count = len(mn.blender.mesh.evaluated(object).data.vertices) + vertices_count = len(bob.evaluate()) assert vertices_count == len(selection[1]) - assert ( - mn.blender.mesh.named_attribute(mn.blender.mesh.evaluated(object), "res_id") - == selection[1] - ).all() + assert (bob.evaluate().named_attribute("res_id") == selection[1]).all() diff --git a/tests/test_trajectory.py b/tests/test_trajectory.py index f76eecad..b1320dbd 100644 --- a/tests/test_trajectory.py +++ b/tests/test_trajectory.py @@ -2,12 +2,11 @@ import os import pytest import molecularnodes as mn -from molecularnodes.blender.mesh import named_attribute import MDAnalysis as mda import numpy as np from .constants import data_dir -from .utils import sample_attribute, NumpySnapshotExtension +from .utils import NumpySnapshotExtension mn._test_register() @@ -139,7 +138,7 @@ def test_subframes(self, Trajectory, interpolate): if interpolate: # now using subframes and having interpolate=True there should be a difference assert not np.allclose(verts_b, verts_c) - assert np.allclose(verts_c, mn.utils.lerp(verts_a, verts_b, t=fraction)) + assert np.allclose(verts_c, mn.bpyd.lerp(verts_a, verts_b, t=fraction)) else: # without using interopolation, the subframes means it should default back # to the previous best selected frame diff --git a/tests/utils.py b/tests/utils.py index 1f2e2a85..4de04317 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,11 +1,8 @@ import bpy - -# from .conftest import molecularnodes as mn import molecularnodes as mn import numpy as np import random -# import pathlib from syrupy.extensions.amber import AmberSnapshotExtension @@ -64,8 +61,8 @@ def sample_attribute( random.seed(seed) if error: - attribute = mn.blender.mesh.named_attribute( - object, attribute, evaluate=evaluate + attribute = mn.bpyd.named_attribute( + obj=object, name=attribute, evaluate=evaluate ) length = len(attribute) @@ -80,8 +77,8 @@ def sample_attribute( return attribute[idx, :] else: try: - attribute = mn.blender.mesh.named_attribute( - object=object, name=attribute, evaluate=evaluate + attribute = mn.bpyd.named_attribute( + obj=object, name=attribute, evaluate=evaluate ) length = len(attribute)