diff --git a/Samples/Flows/Modify_DailyModelMaintenance/_Script/Post_DailyModelMaintenance.py b/Samples/Flows/Modify_DailyModelMaintenance/_Script/Post_DailyModelMaintenance.py index d6bce303..86c8562c 100644 --- a/Samples/Flows/Modify_DailyModelMaintenance/_Script/Post_DailyModelMaintenance.py +++ b/Samples/Flows/Modify_DailyModelMaintenance/_Script/Post_DailyModelMaintenance.py @@ -43,7 +43,7 @@ # import System import sys # required to return an exit code import os - +from csv import QUOTE_MINIMAL # import common library import settings as settings # sets up all commonly used variables and path locations! @@ -102,7 +102,12 @@ def merge_files(): """ Appends temp report files to log files. Returns a list of files where that log file did not exist or which failed to append to data log file - :return: List of files + :param file_name: file name without path and extension + :type file_name: str + :param filter: file name ends on: filter + :type filter: str + + :return: List of files as fully qualified file names which failed to append to data log file :rtype: [str] """ @@ -117,7 +122,11 @@ def merge_files(): output("Need to create data file: {}".format(data_file_name)) try: write_report_data_as_csv( - file_name=data_file_name, header=rFns.LOG_FILE_HEADER, data=[] + file_name=data_file_name, + header=rFns.LOG_FILE_HEADER, + data=[], + enforce_ascii=True, + quoting=QUOTE_MINIMAL, ) except Exception as e: output( @@ -139,45 +148,67 @@ def merge_files(): file_without_ext = get_file_name_without_ext(file_match) # append single temp file to data log file - flag_append = append_to_file(data_file_name, file_match, True) - if flag_append: + result_append = append_to_file(data_file_name, file_match, True) + if result_append.status: output("Appended: {} to: {}".format(file_without_ext, log_file_name)) else: output( - "Failed to append: {} to: {}".format( - file_without_ext, log_file_name + "Failed to append: {} to: {} with message: {}".format( + file_without_ext, log_file_name, result_append.message ) ) failed_files.append(file_match) return failed_files -def append_files( - folder_path, file_prefix, file_suffix, file_extension, out_put_file_name +def append_files_wrapper( + folder_path, file_prefix, file_suffix, file_extension, output_file_name, **kwargs ): """ DuHast append file wrapper... Used to append warnings to warnings report + + :param folder_path: Directory path where the files are located + :type folder_path: str + :param file_prefix: The file prefix common between files to be combined + :type file_prefix: str + :param file_suffix:The file suffix common between files to be combined + :type file_suffix: str + :param file_extension: The file extension of the files to be combined + :type file_extension: str + :param output_file_name: The name of the file to be created + :type output_file_name: str + """ + file_list = get_files_single_directory( folder_path, file_prefix, file_suffix, file_extension ) + # check if any files were found in the directory + if len(file_list) == 0: + output( + "No files found with prefix: {} suffix: {} extension: {}".format( + file_prefix, file_suffix, file_extension + ) + ) + return + # build fully qualified out put file name - full_out_file_name = os.path.join(settings.OUTPUT_FOLDER, out_put_file_name) + full_out_file_name = os.path.join(settings.OUTPUT_FOLDER, output_file_name) for file in file_list: - append_flag = append_to_file( + append_result = append_to_file( source_file=full_out_file_name, append_file=file, ignore_first_row=True ) output( "...appended {} to {} with status [{}]".format( - file, full_out_file_name, append_flag + file, full_out_file_name, append_result.status ) ) -def combine_csv_files(folder_path, file_prefix, file_suffix, file_extension, out_put_file_name): +def combine_csv_files_wrapper(folder_path, file_prefix, file_suffix, file_extension, output_file_name, overwrite_existing, **kwargs): """ Combines csv files into a single csv file with header independent of the files being combined. @@ -189,22 +220,57 @@ def combine_csv_files(folder_path, file_prefix, file_suffix, file_extension, out :type file_suffix: str :param file_extension: The file extension of the files to be combined :type file_extension: str - :param out_put_file_name: The name of the file to be created - :type out_put_file_name: str + :param output_file_name: The name of the file to be created + :type output_file_name: str """ - combine_files_csv_header_independent( + try: + combine_files_csv_header_independent( + folder_path=folder_path, + file_prefix=file_prefix, + file_suffix=file_suffix, + file_extension=file_extension, + output_file_name=output_file_name, + overwrite_existing=overwrite_existing, + ) + except Exception as e: + output( + "Failed to combine files {} with exception: [{}]".format(file_suffix, e) + ) + +def combine_files_wrapper(folder_path,file_prefix,file_suffix,file_extension,output_file_name, **kwargs): + """ + Combines files into a single file with a common header. + + :param folder_path: Directory path where the files are located + :type folder_path: str + :param file_prefix: The file prefix common between files to be combined + :type file_prefix: str + :param file_suffix:The file suffix common between files to be combined + :type file_suffix: str + :param file_extension: The file extension of the files to be combined + :type file_extension: str + :param output_file_name: The name of the file to be created + :type output_file_name: str + """ + + result_combine = combine_files( folder_path=folder_path, file_prefix=file_prefix, file_suffix=file_suffix, file_extension=file_extension, - out_put_file_name=out_put_file_name, - overwrite_existing=True, + output_file_name=output_file_name, ) - + output( + "...combined {} to {} with status [{}]".format( + file_suffix, output_file_name, result_combine.status + ) + ) + + def combine_data_files(): """ - Combines varies report files which are created per Revit project file into a single text file + Combines varies report files which are created per Revit project file into a single text file. """ for file_to_combine in FILE_DATA_TO_COMBINE: output("Combining {} report files.".format(file_to_combine[0])) @@ -213,7 +279,7 @@ def combine_data_files(): file_prefix="", file_suffix=file_to_combine[0], file_extension=settings.REPORT_FILE_NAME_EXTENSION, - out_put_file_name=file_to_combine[1], + output_file_name=file_to_combine[1], overwrite_existing=True, # make sure previous files are overwritten ) @@ -223,32 +289,32 @@ def combine_data_files(): [ settings.REPORT_EXTENSION_SHEETS_SHORT, settings.COMBINED_REPORT_NAME_SHEETS_SHORT, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_SHEETS, settings.COMBINED_REPORT_NAME_SHEETS, - combine_csv_files, + combine_csv_files_wrapper, ], [ settings.REPORT_EXTENSION_SHARED_PARAMETERS, settings.COMBINED_REPORT_NAME_SHARED_PARAMETERS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_GRIDS, settings.COMBINED_REPORT_NAME_GRIDS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_LEVELS, settings.COMBINED_REPORT_NAME_LEVELS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_WORKSETS, settings.COMBINED_REPORT_NAME_WORKSETS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_GEO_DATA, @@ -258,37 +324,37 @@ def combine_data_files(): [ settings.REPORT_EXTENSION_FAMILIES, settings.COMBINED_REPORT_NAME_FAMILIES, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_MARKED_VIEWS, settings.COMBINED_REPORT_NAME_MARKED_VIEWS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_WALL_TYPES, settings.COMBINED_REPORT_NAME_WALL_TYPES, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_VIEWS, settings.COMBINED_REPORT_NAME_VIEWS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_CAD_LINKS, settings.COMBINED_REPORT_NAME_CAD_LINKS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_REVIT_LINKS, settings.COMBINED_REPORT_NAME_REVIT_LINKS, - combine_files, + combine_files_wrapper, ], [ settings.REPORT_EXTENSION_WARNING_TYPES, settings.COMBINED_REPORT_NAME_WARNING_TYPES, - append_files, + append_files_wrapper, ], ] @@ -298,12 +364,14 @@ def combine_data_files(): exit_code = 0 try: + # merge revit model health data files into overall .log file failed_files_ = merge_files() except Exception as e: output("Failed to merge files: [{}]".format(e)) exit_code = 1 try: + # combine data files per project file and data point into single data files per data point output("Combining report files:") combine_data_files() except Exception as e: @@ -311,6 +379,7 @@ def combine_data_files(): exit_code = 1 try: + # create view template hash table files output("Creating view template hash table:") combine_vt_data_result = combine_vt_reports(settings.OUTPUT_FOLDER) output( @@ -324,6 +393,9 @@ def combine_data_files(): try: + # convert files into parquet file format + # this required python 3.10or higher and cant be run in the same post process script as the other tasks + # TODO: move into separate script output("Converting view template hash table files to parquet file format:") convert_to_parquet_result = convert_vt_reports_to_parquet(settings.OUTPUT_FOLDER) output( diff --git a/Samples/Flows/Post_CombineReports.py b/Samples/Flows/Post_CombineReports.py index ff3694fd..f390884e 100644 --- a/Samples/Flows/Post_CombineReports.py +++ b/Samples/Flows/Post_CombineReports.py @@ -109,7 +109,7 @@ def output(message = ''): DATE_STAMP = dateStamp.get_file_date_stamp() # combine report files based on: -fileCombine.combine_files( +combine_status_cad = fileCombine.combine_files( ROOT_PATH, # - part report location DATE_STAMP, # - part report prefix ( same date stamp as current) '_CAD', # - part report file name suffix @@ -117,10 +117,10 @@ def output(message = ''): DATE_STAMP + '_CAD_Links_summary.txt' # - combined report file name in same location as part reports ) # notify users -output('Writing summary Data.... finished: {}_CAD_Links_summary.txt'.format(DATE_STAMP)) +output('Writing summary Data.... finished:[{}] {}_CAD_Links_summary.txt'.format(combine_status_cad.status, DATE_STAMP)) # combine report files based on: -fileCombine.combine_files( +combine_status_revit = fileCombine.combine_files( ROOT_PATH, # - part report location DATE_STAMP, # - part report prefix ( same date stamp as current) '_RVT', # - part report file name suffix @@ -128,4 +128,4 @@ def output(message = ''): DATE_STAMP + '_RVT_Links_summary.txt' # - combined report file name in same location as part reports ) # notify user -output('Writing summary Data.... finished: {}_RVT_Links_summary.txt'.format(DATE_STAMP)) +output('Writing summary Data.... finished:[{}] {}_RVT_Links_summary.txt'.format(combine_status_revit.status, DATE_STAMP)) diff --git a/Samples/Flows/TheChain/_01_ModifyFamilyChange/_Script/Post_ModifyLibraryFamily.py b/Samples/Flows/TheChain/_01_ModifyFamilyChange/_Script/Post_ModifyLibraryFamily.py index 7682bf98..d1089c5a 100644 --- a/Samples/Flows/TheChain/_01_ModifyFamilyChange/_Script/Post_ModifyLibraryFamily.py +++ b/Samples/Flows/TheChain/_01_ModifyFamilyChange/_Script/Post_ModifyLibraryFamily.py @@ -126,7 +126,7 @@ def delete_temp_files(keep_files): def combine_data_files(): """ - Combines varies report files into single text file. + Combines varies report files into single csv text file. Files are filter based on FILE_DATA_TO_COMBINE list. """ @@ -134,9 +134,12 @@ def combine_data_files(): for to_combine in FILE_DATA_TO_COMBINE: output("Combining {} report files.".format(to_combine[0])) # combine files - combine_files( + combine_result = combine_files( settings.WORKING_DIRECTORY, "", to_combine[0], ".temp", to_combine[1] ) + output("Combined report files. [{}]".format(combine_result.status)) + if not combine_result.status: + output("Combined report file: {}".format(combine_result.message)) def move_files(): diff --git a/Samples/Flows/TheChain/_01_X_RenameFamilies/_Script/Post_ModifyLibraryFamily.py b/Samples/Flows/TheChain/_01_X_RenameFamilies/_Script/Post_ModifyLibraryFamily.py index 2e4e46b9..51af12d2 100644 --- a/Samples/Flows/TheChain/_01_X_RenameFamilies/_Script/Post_ModifyLibraryFamily.py +++ b/Samples/Flows/TheChain/_01_X_RenameFamilies/_Script/Post_ModifyLibraryFamily.py @@ -125,7 +125,7 @@ def delete_temp_files(keep_files): def combine_data_files(): """ - Combines varies report files into single text file. + Combines varies report files into single csv text file. Files are filter based on FILE_DATA_TO_COMBINE list. """ @@ -133,13 +133,16 @@ def combine_data_files(): for to_combine in settings.FILE_DATA_TO_COMBINE: output("Combining {} report files.".format(to_combine[0])) # combine files - combine_files( + combine_result = combine_files( settings.WORKING_DIRECTORY, "", to_combine[0], settings.TEMP_FILE_EXTENSION, to_combine[1], ) + output("Combined report files. [{}]".format(combine_result.status)) + if not combine_result.status: + output("Combined report file: {}".format(combine_result.message)) def move_files(): diff --git a/Samples/Flows/TheChain/_01_Y_ChangeFamilyCategory/_Script/Post_ChangeFamilyCategory.py b/Samples/Flows/TheChain/_01_Y_ChangeFamilyCategory/_Script/Post_ChangeFamilyCategory.py index 2a89d8c6..444fe3b7 100644 --- a/Samples/Flows/TheChain/_01_Y_ChangeFamilyCategory/_Script/Post_ChangeFamilyCategory.py +++ b/Samples/Flows/TheChain/_01_Y_ChangeFamilyCategory/_Script/Post_ChangeFamilyCategory.py @@ -130,7 +130,7 @@ def delete_temp_files(keep_files): def combine_data_files(): """ - Combines varies report files into single text file. + Combines varies report files into single csv text file. Files are filter based on FILE_DATA_TO_COMBINE list. """ @@ -138,9 +138,14 @@ def combine_data_files(): for to_combine in FILE_DATA_TO_COMBINE: output("Combining {} report files.".format(to_combine[0])) # combine files - combine_files( + combine_result = combine_files( settings.WORKING_DIRECTORY, "", to_combine[0], ".temp", to_combine[1] ) + output("Combined report files. [{}]".format(combine_result.status)) + if not combine_result.status: + output("Combined report file: {}".format(combine_result.message)) + + def move_files(): diff --git a/Samples/Flows/TheChain/_01_Z_ChangeFamilySubCategory/_Script/Post_ChangeFamilySubCategory.py b/Samples/Flows/TheChain/_01_Z_ChangeFamilySubCategory/_Script/Post_ChangeFamilySubCategory.py index 2a89d8c6..e207828b 100644 --- a/Samples/Flows/TheChain/_01_Z_ChangeFamilySubCategory/_Script/Post_ChangeFamilySubCategory.py +++ b/Samples/Flows/TheChain/_01_Z_ChangeFamilySubCategory/_Script/Post_ChangeFamilySubCategory.py @@ -130,7 +130,7 @@ def delete_temp_files(keep_files): def combine_data_files(): """ - Combines varies report files into single text file. + Combines varies report files into single csv text file. Files are filter based on FILE_DATA_TO_COMBINE list. """ @@ -138,9 +138,13 @@ def combine_data_files(): for to_combine in FILE_DATA_TO_COMBINE: output("Combining {} report files.".format(to_combine[0])) # combine files - combine_files( + combine_result = combine_files( settings.WORKING_DIRECTORY, "", to_combine[0], ".temp", to_combine[1] ) + output("Combined report files. [{}]".format(combine_result.status)) + if not combine_result.status: + output("Combined report file: {}".format(combine_result.message)) + def move_files(): diff --git a/Samples/Flows/TheChain/_02_ModifyFamilyLibraryReloadAdvanced/_Script/Post_ModifyLibraryFamilyReload.py b/Samples/Flows/TheChain/_02_ModifyFamilyLibraryReloadAdvanced/_Script/Post_ModifyLibraryFamilyReload.py index 4b121824..e06f8e1d 100644 --- a/Samples/Flows/TheChain/_02_ModifyFamilyLibraryReloadAdvanced/_Script/Post_ModifyLibraryFamilyReload.py +++ b/Samples/Flows/TheChain/_02_ModifyFamilyLibraryReloadAdvanced/_Script/Post_ModifyLibraryFamilyReload.py @@ -141,9 +141,13 @@ def combine_data_files(): for to_combine in FILE_DATA_TO_COMBINE: output("Combining {} report files.".format(to_combine[0])) # combine files - combine_files( + combine_result = combine_files( settings.WORKING_DIRECTORY, "", to_combine[0], ".temp", to_combine[1] ) + output("Combined report files. [{}]".format(combine_result.status)) + if not combine_result.status: + output("Combined report file: {}".format(combine_result.message)) + def move_files(): diff --git a/Samples/pyRevit/families_reload/ViewModels/FamiliesSelectionViewModel.py b/Samples/pyRevit/families_reload/ViewModels/FamiliesSelectionViewModel.py index 578ba61d..8521156e 100644 --- a/Samples/pyRevit/families_reload/ViewModels/FamiliesSelectionViewModel.py +++ b/Samples/pyRevit/families_reload/ViewModels/FamiliesSelectionViewModel.py @@ -4,13 +4,15 @@ from duHast.UI.Objects.WPF.ViewModels.ViewModelBase import ViewModelBase from duHast.UI.Objects.WPF.Commands.RelayCommand import RelayCommand +from duHast.UI.Objects.WPF.ViewModels.FilterItem import FilterItem from duHast.Utilities.files_get import get_files_from_directory_walker_with_filters_simple from System.Collections.ObjectModel import ObservableCollection -from System.Windows.Data import CollectionViewSource +from System.Windows.Data import CollectionViewSource, PropertyGroupDescription from System.ComponentModel import ListSortDirection, SortDescription from ViewModels.FamilyViewModel import FamilyViewModel +#from ViewModels.FilterItem import FilterItem from Commands.ReloadfamiliesCommand import ReloadFamiliesCommand from Objects.match_status_names import MatchStatusNames @@ -21,12 +23,13 @@ def __init__(self, revit_model, navigation_service): super(FamiliesSelectionViewModel, self).__init__() # properties + # the collection of families to be displayed in the view self._families = ObservableCollection[FamilyViewModel]() - self._families_view = CollectionViewSource.GetDefaultView(self._families) + + # the command used to sort when the user clicks on the column headers self._sort_command = RelayCommand(self.sort_families) - # properties - self._families_filtered = ObservableCollection[FamilyViewModel]() + # the revit wpf model object containing the settings and families to be displayed self._revit_model = revit_model # commands @@ -37,78 +40,151 @@ def __init__(self, revit_model, navigation_service): execute=self.close_window ) - # add room reservations to view model + # set filter lists for column filters + self._unique_match_statuses = ObservableCollection[FilterItem]() + self._unique_categories = ObservableCollection[FilterItem]() + self._unique_names = ObservableCollection[FilterItem]() + self._unique_shared_statuses = ObservableCollection[FilterItem]() + + # add families to view model self.update_families() - + + # set unique values for the column context menu filters + self.set_unique_values() + # Set initial sort state self._current_sort_column = "FamilyName" self._current_sort_direction = ListSortDirection.Ascending self._families_view.SortDescriptions.Add(SortDescription(self._current_sort_column, self._current_sort_direction)) + + + @property + def UniqueFamilyNames(self): + """ + The collection of unique family names to be displayed in the column header context filter menu. + """ + return self._unique_names + + @property + def UniqueMatchStatuses(self): + """ + The collection of unique match statuses to be displayed in the column header context filter menu. + """ + return self._unique_match_statuses + + @property + def UniqueCategories(self): + """ + The collection of unique categories to be displayed in the column header context filter menu. + """ + return self._unique_categories + + @property + def UniqueSharedStatuses(self): + """ + The collection of unique shared statuses to be displayed in the column header context filter menu. + """ + return self._unique_shared_statuses @property def FamiliesView(self): + """ + The collection view of the families collection. + """ return self._families_view @property def SortCommand(self): + """ + The command used to sort the collection view of the families collection. + """ return self._sort_command - def sort_families(self, sort_by): - current_sort = None - if self._families_view.SortDescriptions.Count > 0: - current_sort = self._families_view.SortDescriptions[0] - - direction = ListSortDirection.Ascending - if current_sort and current_sort.PropertyName == sort_by: - if current_sort.Direction == ListSortDirection.Ascending: - direction = ListSortDirection.Descending - else: - direction = ListSortDirection.Ascending - - self._families_view.SortDescriptions.Clear() - self._families_view.SortDescriptions.Add(SortDescription(sort_by, direction)) - self._families_view.Refresh() - - # Update current sort column and direction - self._current_sort_column = sort_by - self._current_sort_direction = direction - @property def LibraryPath(self): + """ + The library path property of the revit model object. + Describes the location of the revit families library on a file server. This location is used to determine the match status of the families. + """ print("accessing library path getter") return self._revit_model.settings.library_path @LibraryPath.setter def LibraryPath(self, value): - print("accessing library path setter: [{}]".format(value)) + """ + The setter for the library path property of the revit model object. + + This setter is used to update the library path property of the revit model object through a two binding to the view and to update the match status of the families based on the new library path when it changes. + """ + + # type checking if not(isinstance (value,str)): raise ValueError("Value must be of type str, got {} instead.".format(type(value))) + + # set the new library path value self._revit_model.settings.library_path = value - print("settings: {}".format(self._revit_model.settings.to_json())) + #print("settings: {}".format(self._revit_model.settings.to_json())) # update the match status of all families self.update_families() - # raise the change event in order for the reload buttons availbiltiy check to be triggerd + # raise the change event in order for the reload buttons availability check to be triggered self.RaisePropertyChanged("LibraryPath") @property def Families(self): - print("accessing families") + """ + The collection of families to be displayed in the view. ( Not used in the view, the collection view is used instead) + """ return self._families @property def ReloadFamiliesCommand(self): - print("in Reload command") + """ + The command used to when the reload button in the view is clicked. + + Stores the selected family objects in the revit model object and triggers the close of the window. + """ return self._reload_families_command def find_files(self, file_paths, file_name): + """ + Finds all files in the list of file paths that have the same file name as the file name passed in as an argument. + Used to determine the match status of the families. ( is there no match, a single match or multiple matches) + + Args: + file_paths (list): A list of file paths to search in. + file_name (str): The file name to search for. + + Returns: + list: A list of file paths that have the same file name as the file name passed in as an argument. + """ return [path for path in file_paths if os.path.basename(path) == file_name] def update_families(self): + """ + Updates the families collection with the families from the revit model object and sets the match status of the families based on the library path. + + Also sets up the collection view for the families collection. + Set up includes grouping the families by match status. + + """ # clear the collection self._families.Clear() + # set up collection view based on the observable collection of families + # this is what the xaml view is binding to + # this is required to be able to sort, group and filter the collection view without affecting the observable collection + self._families_view = CollectionViewSource.GetDefaultView(self._families) + + # group the families by match status (this will mean that the families will be sorted by match status first and than by any other sort criteria) + # MatchStatus is a property of the FamilyViewModel + self._families_view.GroupDescriptions.Add(PropertyGroupDescription("MatchStatus")) + + # set up a filter for the collection view + self._families_view.Filter = self.filter_families + + # get all families in the library path ( required to set the match status of the families) families_in_directory = [] if(self._revit_model.settings.library_path): families_in_directory = get_files_from_directory_walker_with_filters_simple( @@ -117,17 +193,18 @@ def update_families(self): ) if(families_in_directory): - print("found {} revit families".format(len(families_in_directory))) + print("found {} Revit families".format(len(families_in_directory))) else: print("Found no families in directory") families_in_directory = [] - # update the collection with values from the revit model + # update the collection with families from the revit model object for family in self._revit_model.get_all_families(): family_view_model = FamilyViewModel(family=family) # check the match status!! files_matching = self.find_files(families_in_directory, family_view_model.FamilyName+".rfa") + # set the match status based on the number of files found if len(files_matching) == 1: family_view_model.MatchStatus = MatchStatusNames.MATCH_OK.value family_view_model.FamilyFilePath = files_matching[0] @@ -138,8 +215,122 @@ def update_families(self): family_view_model.MatchStatus = MatchStatusNames.MULTIPLE_MATCHES.value family_view_model.FamilyFilePath = None + # add the family to the observable collection self._families.Add(family_view_model) + + def set_unique_values(self): + """ + Sets the unique values for the column filters context menu depending on the values in the families collection. + """ + # Use sets to collect unique values for column filters + unique_match_statuses_set = set() + unique_categories_set = set() + unique_names_set = set() + unique_shared_statuses_set = set() + + # extract unique values for column filters ( name and category only) + for family in self._families: + unique_categories_set.add(family.FamilyCategory) + unique_names_set.add(family.FamilyName) + unique_shared_statuses_set.add(family.FamilyIsShared) + + # set unique values for match status + unique_match_statuses_set.add(MatchStatusNames.NO_MATCH.value) + unique_match_statuses_set.add(MatchStatusNames.MULTIPLE_MATCHES.value) + unique_match_statuses_set.add(MatchStatusNames.MATCH_OK.value) + + # clear collection first before adding new values + self._unique_match_statuses.Clear() + self._unique_categories.Clear() + self._unique_names.Clear() + self._unique_shared_statuses.Clear() + + # add unique values to the filter lists + for match_status in unique_match_statuses_set: + self._unique_match_statuses.Add(FilterItem(value=match_status, refresh_view_method=self.refresh_view)) + for category in unique_categories_set: + self._unique_categories.Add(FilterItem(value=category, refresh_view_method=self.refresh_view)) + for name in unique_names_set: + self._unique_names.Add(FilterItem(value=name, refresh_view_method=self.refresh_view)) + for shared_status in unique_shared_statuses_set: + self._unique_shared_statuses.Add(FilterItem(value=shared_status, refresh_view_method=self.refresh_view)) + + + def sort_families(self, sort_by): + """ + Sorts the families collection view based on the column header that was clicked. + + Args: + sort_by (str): The property name of the FamilyViewModel to sort by. + """ + + # Check if the column is already sorted and if so, reverse the sort direction + current_sort = None + if self._families_view.SortDescriptions.Count > 0: + current_sort = self._families_view.SortDescriptions[0] + direction = ListSortDirection.Ascending + if current_sort and current_sort.PropertyName == sort_by: + if current_sort.Direction == ListSortDirection.Ascending: + direction = ListSortDirection.Descending + else: + direction = ListSortDirection.Ascending + + self._families_view.SortDescriptions.Clear() + self._families_view.SortDescriptions.Add(SortDescription(sort_by, direction)) + + # refreshes the view in the UI + self._families_view.Refresh() + + # Update current sort column and direction + self._current_sort_column = sort_by + self._current_sort_direction = direction + + def filter_families(self, obj): + if(isinstance(obj, FamilyViewModel)): + is_match = True + # check if the family matches the filter criteria for each column! + # check match status + if self._unique_match_statuses: + is_match = is_match and any(filter_item.IsChecked for filter_item in self._unique_match_statuses if filter_item.Value == obj.MatchStatus) + # stop checking if the family does not match the filter criteria + if not is_match: + return False + # check category + if self._unique_categories: + is_match = is_match and any(filter_item.IsChecked for filter_item in self._unique_categories if filter_item.Value == obj.FamilyCategory) + # stop checking if the family does not match the filter criteria + if not is_match: + return False + # check name + if self._unique_names: + is_match = is_match and any(filter_item.IsChecked for filter_item in self._unique_names if filter_item.Value == obj.FamilyName) + # stop checking if the family does not match the filter criteria + if not is_match: + return False + # check shared status + if self._unique_shared_statuses: + is_match = is_match and any(filter_item.IsChecked for filter_item in self._unique_shared_statuses if filter_item.Value == obj.FamilyIsShared) + # stop checking if the family does not match the filter criteria + if not is_match: + return False + + # if the family matches all filter criteria, return True + return True + else: + return False + + def refresh_view(self): + """ + Refreshes the view by updating the families collection and the unique values for the column filters. + """ + # refreshes the view in the UI + self._families_view.Refresh() + def close_window(self, window): + """ + Closes the window that is passed in as an argument. + """ + if window: window.Close() \ No newline at end of file diff --git a/Samples/pyRevit/families_reload/Views/FamiliesSelectionView.xaml b/Samples/pyRevit/families_reload/Views/FamiliesSelectionView.xaml index 60fc4fc6..e3bab4b7 100644 --- a/Samples/pyRevit/families_reload/Views/FamiliesSelectionView.xaml +++ b/Samples/pyRevit/families_reload/Views/FamiliesSelectionView.xaml @@ -21,6 +21,32 @@ + + + + @@ -29,7 +55,34 @@ - + + + + + @@ -39,6 +92,27 @@ + + + + @@ -48,6 +122,27 @@ + + + + diff --git a/Samples/pyRevit/families_reload/reloader.py b/Samples/pyRevit/families_reload/reloader.py index 9fb3f929..292fca6d 100644 --- a/Samples/pyRevit/families_reload/reloader.py +++ b/Samples/pyRevit/families_reload/reloader.py @@ -160,7 +160,9 @@ def reloaded_families_entry(doc, output, forms, families = None): if __name__ == "__main__": dummy_families = [ + RevitFamily(id=FamilyID(1231), family_name="abc_1", family_category="Casework",is_shared=False,match_status="None"), RevitFamily(id=FamilyID(1234), family_name="test_1", family_category="Furniture",is_shared=False,match_status="None"), - RevitFamily(id=FamilyID(5678), family_name="test_2", family_category="Casework",is_shared=True,match_status="None") + RevitFamily(id=FamilyID(5678), family_name="test_2", family_category="Casework",is_shared=True,match_status="OK"), + RevitFamily(id=FamilyID(5679), family_name="test_3", family_category="Casework",is_shared=True,match_status="OK") ] test_result = reloaded_families_entry(doc=None, output=None, forms=None, families = dummy_families) \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index f64e44d9..bcbbcbc9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,7 +7,7 @@ package_dir = [metadata] name = duhast -version = 1.1.9 +version = 1.1.10 author = Jan Christel author_email = jan.r.christel@gmail.com description = Revit API sample snippets and Revit Batch Processor flows. diff --git a/src/duHast/Revit/Common/Geometry/curve.py b/src/duHast/Revit/Common/Geometry/curve.py index d7ef9eb3..8e592b60 100644 --- a/src/duHast/Revit/Common/Geometry/curve.py +++ b/src/duHast/Revit/Common/Geometry/curve.py @@ -508,4 +508,48 @@ def get_curve_level(doc, curve): if(level_id==ElementId.InvalidElementId): return None level = doc.GetElement(level_id) - return level \ No newline at end of file + return level + + +def are_lines_parallel(line_one, line_two): + """ + Check if two lines are parallel. + + Args: + line_one (object): The first line to compare. + line_two (object): The second line to compare. + + Returns: + bool: True if the lines are parallel, False otherwise. + """ + # Get normalized directions for both lines + dir_one = line_one.Direction.Normalize() + dir_two = line_two.Direction.Normalize() + + # Check if the directions are the same or opposite + return dir_one.IsAlmostEqualTo(dir_two) or dir_one.IsAlmostEqualTo(dir_two.Negate()) + +def are_lines_perpendicular(line_one, line_two): + """ + Check if two lines are perpendicular. + + Args: + line_one (object): The first line to compare. + line_two (object): The second line to compare. + + Returns: + bool: True if the lines are perpendicular, False otherwise. + """ + + # check if the lines are perpendicular: + # calculate a perpendicular vector to the first line + # and check if it is parallel to the second line + # thats done rather than using the dot product of the two lines + # to stay within the means of Revit API calculation precision and avoid floating point errors + perpendicular_vector_to_one = XYZ(-line_one.Direction.Y, line_one.Direction.X, 0) + + # normalize the direction of the second line + dir_two = line_two.Direction.Normalize() + + # check if the perpendicular vector is parallel to the second line + return perpendicular_vector_to_one.IsAlmostEqualTo(dir_two) or perpendicular_vector_to_one.IsAlmostEqualTo(dir_two.Negate()) diff --git a/src/duHast/Revit/Family/Data/Objects/family_data_family.py b/src/duHast/Revit/Family/Data/Objects/family_data_family.py index fe1141d2..48f27cc0 100644 --- a/src/duHast/Revit/Family/Data/Objects/family_data_family.py +++ b/src/duHast/Revit/Family/Data/Objects/family_data_family.py @@ -174,10 +174,47 @@ def __eq__(self, other): other.family_category_nesting_path, ) + def compare_name_and_category(self, other): + """ + Compares family_name and family_category properties only + + :param other: Another FamilyDataFamily instance + :type other: :class:`.FamilyDataFamily` + :return: True if family_name and family_category are equal otherwise false + :rtype: bool + """ + return isinstance(other, FamilyDataFamily) and ( + self.family_name, + self.family_category, + ) == ( + other.family_name, + other.family_category, + ) + + # python 2.7 needs custom implementation of not equal def __ne__(self, other): return not self.__eq__(other=other) + + def __hash__(self): + """ + Hash the family data. + + Hash only considers the family_name, family_category, family_nesting_path and family_category_nesting_path properties. + + :return: Hash of the family data + :rtype: int + """ + + return hash( + (self.family_name, + self.family_category, + self.family_nesting_path, + self.family_category_nesting_path, + ) + ) + def _build_nesting_by_name(self): """ Build the nesting name for the family data. diff --git a/src/duHast/Revit/Family/Data/family_base_data_missing_families.py b/src/duHast/Revit/Family/Data/family_base_data_missing_families.py index 62a67a41..d48e7c2d 100644 --- a/src/duHast/Revit/Family/Data/family_base_data_missing_families.py +++ b/src/duHast/Revit/Family/Data/family_base_data_missing_families.py @@ -37,7 +37,10 @@ from duHast.Utilities.Objects.timer import Timer from duHast.Utilities.Objects import result as res from duHast.Revit.Family.Data.family_data_family_processor_utils import process_data -from duHast.Revit.Family.Data.Objects.family_base_data_processor_defaults import NESTING_SEPARATOR +from duHast.Revit.Family.Data.Objects.family_base_data_processor_defaults import ( + NESTING_SEPARATOR, +) + def get_unique_nested_families_from_path_data(path_data): """ @@ -57,11 +60,11 @@ def get_unique_nested_families_from_path_data(path_data): for entry in path_data: family_name_nesting = entry[0] category_name_nesting = entry[1] - + # split into chunks at separator families = family_name_nesting.split(NESTING_SEPARATOR) categories = category_name_nesting.split(NESTING_SEPARATOR) - + if len(families) != len(categories): raise ValueError( "Name path length: {} is different to category path length: {}".format( @@ -80,13 +83,13 @@ def get_unique_nested_families_from_path_data(path_data): def get_unique_root_families_from_family_data(family_data): """ Function to retrieve unique root families from a list of family data objects. - + :param family_data: list of family data objects :type family_data: list[:class:`.FamilyDataFamily`] :return: list of tuples containing family name and category :rtype: list[(family_name, family_category)] """ - + # family data is a list of family_data_family instances # will be a list of tuples 0: family name, 1 family category @@ -103,7 +106,7 @@ def get_unique_root_families_from_family_data(family_data): def get_missing_families(root_families, nested_families): """ Function to find missing families from a list of root families and nested families. - + :param root_families: list of tuples representing root family name and category :type root_families: list[(family_name, family_category)] :param nested_families: list of tuples representing nested family name and category @@ -136,7 +139,9 @@ def process_families(family_data, result_list): for family in family_data: # process each family family.process() - longest_path = family.get_longest_unique_nesting_path() # returns a list of tuples (family root name path, family root category path) + longest_path = ( + family.get_longest_unique_nesting_path() + ) # returns a list of tuples (family root name path, family root category path) if longest_path is not None: for lp in longest_path: result_list.append((family, lp)) @@ -148,9 +153,9 @@ def _find_missing_families(families, families_longest_path): Returns a list of tuples representing nested family name and category which does not have a matching root family. :param families: List of family instances - :type families: [] + :type families: [:class:`.FamilyDataFamily`] :param families_longest_path: list of tuples representing longest unique name nesting path and matching longest unique category nesting path - :type families_longest_path: [] + :type families_longest_path: [(family name, family category)] :return: List of tuples representing the name and category of a family missing (from the library and therefore not presented as root family) :rtype: [(family name, family category)] """ @@ -213,25 +218,27 @@ def check_families_missing_from_library(family_base_data_report_file_path): family_base_data_report_file_path=family_base_data_report_file_path, do_this=process_families, ) - + # check if processing was successful, otherwise get out if families_processed.status == False: raise ValueError(families_processed.message) - + # get results - families = [] # list of family instances - families_longest_path = [] # list of tuples representing longest unique name nesting path and matching longest unique category nesting path + families = [] # list of family instances + families_longest_path = ( + [] + ) # list of tuples representing longest unique name nesting path and matching longest unique category nesting path for nested_tuple in families_processed.result: # per nested path there might be multiple entries of the same family families.append(nested_tuple[0]) families_longest_path.append(nested_tuple[1]) - + return_value.append_message( "{} Found: {} unique longest path in families.".format( t_process.stop(), len(families_longest_path) ) ) - + # start timer again t_process.start() @@ -242,7 +249,8 @@ def check_families_missing_from_library(family_base_data_report_file_path): return_value.append_message( "Found {} missing families. {}".format( - len(missing_families), t_process.stop()) + len(missing_families), t_process.stop() + ) ) if len(missing_families) > 0: return_value.result = missing_families @@ -260,10 +268,20 @@ def check_families_missing_from_library(family_base_data_report_file_path): def get_direct_root_families(families, missing_families): + """ + Returns a list of FamilyDataFamily instances which represent the direct parents (host families) of the missing families. + + :param families: List of family instances + :type families: [:class:`.FamilyDataFamily`] + :param missing_families: List of tuples representing the name and category of a family missing (from the library and therefore not presented as root family) + :type missing_families: [(family name, family category)] + :return: List of family instances which represent the direct parents (host families) of the missing families + :rtype: [:class:`.FamilyDataFamily`] + """ # return value direct_host_families = [] - + direct_host_families_short = [] # loop over families and check for match at nesting level 01 for family in families: # families at nesting level 1 @@ -274,9 +292,16 @@ def get_direct_root_families(families, missing_families): family_at_level_one.family_name, family_at_level_one.family_category, ) - if test_value in missing_families: - # match found... - direct_host_families.append(family_at_level_one) + if ( + test_value in missing_families + and "{} {}".format(family.family_name, family.family_category) + not in direct_host_families_short + ): + # match found...store the direct host family + direct_host_families.append(family) + direct_host_families_short.append( + "{} {}".format(family.family_name, family.family_category) + ) return direct_host_families @@ -284,6 +309,7 @@ def get_direct_root_families(families, missing_families): def find_missing_families_direct_host_families(family_base_data_report_file_path): """ Returns a list of FamilyDataFamily instances which represent the direct parents (host families) of the missing families. + Only processed root families which have a missing family as a direct nested family are returned. (any root families which contains a missing family nested further down the nesting tree are not returned) :param family_base_data_report_file_path: Fully qualified file path to family base data report file. :type family_base_data_report_file_path: str @@ -315,9 +341,6 @@ def find_missing_families_direct_host_families(family_base_data_report_file_path try: - # start timer again - t_process.start() - # load and process families families_processed_result = process_data( family_base_data_report_file_path=family_base_data_report_file_path, @@ -351,22 +374,45 @@ def find_missing_families_direct_host_families(family_base_data_report_file_path families=families, families_longest_path=families_longest_path ) + # check if there are any missing families + if len(missing_families) == 0: + return_value.append_message( + "No missing families found in data set. {}".format(t_process.stop()) + ) + return return_value + # get the direct root families of nested families identified as missing - direct_root_families = [] - if len(missing_families) > 0: - # loop over longest path and find the ones where the second entry in the nesting path is a missing family - direct_root_families = get_direct_root_families( - families=families, - missing_families=missing_families, + # logging + return_value.append_message( + "Found {} missing families. {}".format( + len(missing_families), t_process.stop() ) + ) + for mf in missing_families: + return_value.append_message("Missing family: {} {}".format(mf[0], mf[1])) + # start timer again + t_process.start() + + # set up a list for direct root families + direct_root_families = [] + + # loop over longest path and find the ones where the second entry in the nesting path is a missing family + direct_root_families = get_direct_root_families( + families=families, + missing_families=missing_families, + ) + + # logging + return_value.append_message( + "Found {} direct hosts to missing families. {}".format( + len(direct_root_families), t_process.stop() + ) + ) + for drf in direct_root_families: return_value.append_message( - "Found {} direct hosts to missing families. {}".format( - len(direct_root_families), t_process.stop() - ) + "Direct host family: {} {}".format(drf.family_name, drf.family_category) ) - else: - return_value.append_message("No missing root families found in data set.") # update result property as required if len(direct_root_families) > 0: diff --git a/src/duHast/Revit/Family/family_types_get_data_from_xml.py b/src/duHast/Revit/Family/family_types_get_data_from_xml.py index cd33a013..50e7093e 100644 --- a/src/duHast/Revit/Family/family_types_get_data_from_xml.py +++ b/src/duHast/Revit/Family/family_types_get_data_from_xml.py @@ -51,11 +51,12 @@ FamilyTypeDataStorage, ) from duHast.Utilities.Objects.result import Result +from duHast.Utilities.files_io import get_file_name_without_ext, get_directory_path_from_file_path -def write_data_to_xml_file_and_read_it_back(an_action_to_write_xml_data): +def write_data_to_temp_xml_file_and_read_it_back(an_action_to_write_xml_data): """ - Write the data to an XML file and read it back. + Write the data to a temp XML file and read it back. :param an_action_to_write_xml_data: The action to write the XML data. :type an_action_to_write_xml_data: function @@ -90,6 +91,37 @@ def write_data_to_xml_file_and_read_it_back(an_action_to_write_xml_data): return doc_xml +def write_data_to_xml_file_and_read_it_back(an_action_to_write_xml_data, xml_file_path): + """ + Write the data to an XML file and read it back. + + :param an_action_to_write_xml_data: The action to write the XML data. + :type an_action_to_write_xml_data: function + :param xml_file_path: The path of the XML file. + :type xml_file_path: str + + :return: The data read back from the XML file. + :rtype: XmlDocument or None if an error occurred. + """ + + doc_xml = None + + try: + + # Write the data to the file + an_action_to_write_xml_data(xml_file_path) + + # Read the data back from the file + with open(xml_file_path, "r") as file: + xml_content = file.read() + + # Load the XML content + doc_xml = XmlDocument() + doc_xml.LoadXml(xml_content) + except Exception as e: + return None + return doc_xml + def read_xml_into_storage(doc_xml, family_name, family_path): """ @@ -185,23 +217,19 @@ def read_xml_into_storage(doc_xml, family_name, family_path): return type_data -def get_type_data_via_XML_from_family_file(application, family_name, family_path): +def get_type_data_via_XML_from_family_file(application, family_name, family_path, use_temporary_file=True): """ Get the family type data from the family document using the XML extraction method. This can be used to extract the type data from a family document within a Revit session but without opening the family in Revit. :param application: The Revit application object. :type application: Autodesk.Revit.ApplicationServices.Application - :param path: The path of the family file. - :type path: str :param family_name: The name of the family. :type family_name: str :param family_path: The path of the family file. :type family_path: str - :param root_path: The root path of the family. (nesting tree of host family names) - :type root_path: str - :param root_category_path: The root category path of the family. (nesting tree of host family category names) - :type root_category_path: str + :param use_temporary_file: Whether to use a temporary file for the XML data. + :type use_temporary_file: bool :return: A result object with .result containing a list of family type data objects. (or empty if failed) :rtype: Result @@ -219,8 +247,17 @@ def action(temp_path_xml): # this is a method of the application object and does not require the family to be open... application.ExtractPartAtomFromFamilyFile(family_path, temp_path_xml) - # Write the data to an XML file and read it back - doc_xml = write_data_to_xml_file_and_read_it_back(action) + doc_xml = None + + if use_temporary_file: + # Write the data to an XML file and read it back + doc_xml = write_data_to_temp_xml_file_and_read_it_back(action) + else: + dir_out = get_directory_path_from_file_path(family_path) + family_name = get_file_name_without_ext(family_path) + + # Write the data to an XML file and read it back + doc_xml = write_data_to_xml_file_and_read_it_back(action, os.path.join(dir_out,family_name + ".xml")) # check if an xml document was created if doc_xml is None: @@ -260,7 +297,7 @@ def action(temp_path_xml): revit_family.ExtractPartAtom(temp_path_xml) # Write the data to an XML file and read it back - doc_xml = write_data_to_xml_file_and_read_it_back(action) + doc_xml = write_data_to_temp_xml_file_and_read_it_back(action) # check if an xml document was created if doc_xml is None: diff --git a/src/duHast/Revit/Rooms/Objects/RoomSpatialForViews.py b/src/duHast/Revit/Rooms/Objects/RoomSpatialForViews.py new file mode 100644 index 00000000..bbb7f6cf --- /dev/null +++ b/src/duHast/Revit/Rooms/Objects/RoomSpatialForViews.py @@ -0,0 +1,157 @@ +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Revit API utility functions for the spatial properties of room elements. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""" + +#!/usr/bin/python +# -*- coding: utf-8 -*- +# License: +# +# +# Revit Batch Processor Sample Code +# +# BSD License +# Copyright 2024, Jan Christel +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +# +# This software is provided by the copyright holder "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. +# In no event shall the copyright holder be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; +# or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. +# +# + +from duHast.Revit.Rooms.Objects.RoomBaseObject import RoomBaseObj +from duHast.Revit.Rooms.Objects.RoomSpatialObject import RoomSpatialObj +from Autodesk.Revit.DB import ( + Line, + SpatialElementBoundaryLocation, + XYZ, +) + +from duHast.Utilities.unit_conversion import convert_imperial_feet_to_metric_mm +from duHast.Revit.Common.Geometry.curve import are_lines_parallel, are_lines_perpendicular + +class RoomSpatialForView(RoomBaseObj): + def __init__( + self, rvt_doc, room, boundary_location=SpatialElementBoundaryLocation.Finish + ): + # initialize the base class + super(RoomSpatialForView, self).__init__(rvt_doc, room, boundary_location=boundary_location) + + # Use the helper method to calculate spatial data + (self.segments, self.room_walls, self.wall_segs, + self.bbox, self.bbox_centre) = self._calculate_spatial_data(rvt_doc, room, boundary_location) + + + @staticmethod + def _calculate_spatial_data(rvt_doc, room, boundary_location): + """ + Helper method to compute spatial data for a room. + + This is done because I cant directly inherit from RoomSpatialObj due to exception: + IronPython.Runtime.Exceptions.TypeErrorException: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases + which is caused by .net metaclass conflict. + + """ + + # Create a dummy room spatial object to get some spatial data + dummy_room = RoomSpatialObj(rvt_doc, room, boundary_location=boundary_location) + + return dummy_room.segments, dummy_room.room_walls, dummy_room.wall_segs, dummy_room.bbox, dummy_room.bbox_centre + + + def is_room_rectalinear(self): + """ + Check if the room is rectalinear. ( all room bounding segements are either parallel or perpendicular to each other ) + + :return: True if the room is rectalinear, False otherwise. + :rtype: bool + """ + + # IList> segments = room.GetBoundarySegments(new SpatialElementBoundaryOptions()); + # get all room bounding segments ( this is a list of list of BoundarySegment objects ) + room_bounding_segemnts = self.segments + + # proceed only if room is bound: + if room_bounding_segemnts is None: + return False + + + # check if all room bounding segments are either parallel or perpendicular to each other + # if so, return True else return False + + # loop over outer loop segments only ( representing the outer room boundary and not any inner islands ) and check if parallel or perpendicular to the previous segment + room_bounding_segemnts_outer_loop = room_bounding_segemnts[0] + #print ("Room bounding segments loops: ", len(room_bounding_segemnts)) + #print("Room bounding segments outer loop: ", len(room_bounding_segemnts_outer_loop)) + for i in range(1, len(room_bounding_segemnts_outer_loop)): + # get the current segment + current_segment_curve = room_bounding_segemnts_outer_loop[i].GetCurve() + #print("...Current segment curve: ", current_segment_curve) + #print("...Current segment type: ", type(room_bounding_segemnts_outer_loop[i])) + # get the previous segment + previous_segment_curve = room_bounding_segemnts_outer_loop[i-1].GetCurve() + #print("...Previous segment curve: ",previous_segment_curve) + #print("...Previous segment type: ", type(room_bounding_segemnts_outer_loop[i-1])) + + # make sure both curves are lines ( no arcs or other curves ) + if (isinstance(current_segment_curve, Line) and isinstance(previous_segment_curve, Line)): + # check if the current segment is parallel or perpendicular to the previous segment + if(not(are_lines_parallel(current_segment_curve, previous_segment_curve) or are_lines_perpendicular(current_segment_curve, previous_segment_curve))): + # if not, the room is not rectalinear + #print("......Current segment is parallel {} \n{}{}" .format(are_lines_parallel(current_segment_curve, previous_segment_curve), current_segment_curve.Direction.Normalize(), previous_segment_curve.Direction.Normalize())) + #print("......Current segment is perpendicular {} \n{}" .format(are_lines_perpendicular(current_segment_curve, previous_segment_curve), current_segment_curve.Direction.Normalize().DotProduct(previous_segment_curve.Direction.Normalize()))) + + print("...length of current segment: ", convert_imperial_feet_to_metric_mm(current_segment_curve.Length)) + + return False + else: + # only lines are supported + #print("Only lines are supported for checking if room is rectalinear") + return False + + # if all segments are either parallel or perpendicular to each other, the room is rectalinear + return True + + + def is_room_aligned_to_its_bounding_box(self): + """ + Check if the room is aligned to the bounding box. ( all room bounding segements are either parallel or perpendicular to the bounding box ) + + :return: True if the room is aligned to the bounding box, False otherwise. + :rtype: bool + """ + + # check if room is bound: + if self.bbox == None: + return False + + # check if room is rectalinear + if not(self.is_room_rectalinear()): + # check if any random room bounding segments is parallel or perpendicular to the bounding box + return False + + + # check if any arbritary room bounding segment is parallel or perpendicular to any bounding box edge + if(self.segments is not None and len(self.segments) > 0): + # get the first room bounding segment + room_bounding_segemnts_outer_loop = self.segments[0] + first_segment_curve = room_bounding_segemnts_outer_loop[0].GetCurve() + # check if the first segment is parallel or perpendicular to any bounding box edge + # get a bounding box edge from min XYZ and max XYZ + # by using Line.CreateBound + bbox_edge = Line.CreateBound(self.bbox.Min, XYZ(self.bbox.Max.X, self.bbox.Min.Y, self.bbox.Min.Z)) + # check if the first segment is parallel or perpendicular to the bounding box edge + if(are_lines_parallel(first_segment_curve, bbox_edge) or are_lines_perpendicular(first_segment_curve, bbox_edge)): + return True + else: + return False + else: + return False diff --git a/src/duHast/Revit/Rooms/Objects/RoomSpatialObject.py b/src/duHast/Revit/Rooms/Objects/RoomSpatialObject.py index 0ab7cab3..555b71be 100644 --- a/src/duHast/Revit/Rooms/Objects/RoomSpatialObject.py +++ b/src/duHast/Revit/Rooms/Objects/RoomSpatialObject.py @@ -45,7 +45,8 @@ def __init__( self, rvt_doc, room, boundary_location=SpatialElementBoundaryLocation.Finish ): super(RoomSpatialObj, self).__init__(rvt_doc, room) - RoomBaseObj.__init__(self, rvt_doc, room) + #RoomBaseObj.__init__(self, rvt_doc, room) # this is the same as above super call + spat_opts = SpatialElementBoundaryOptions() spat_opts.SpatialElementBoundaryLocation = boundary_location self.segments = get_room_segments(room, spat_opts) diff --git a/src/duHast/UI/Objects/WPF/ViewModels/FilterItem.py b/src/duHast/UI/Objects/WPF/ViewModels/FilterItem.py new file mode 100644 index 00000000..d708b86e --- /dev/null +++ b/src/duHast/UI/Objects/WPF/ViewModels/FilterItem.py @@ -0,0 +1,104 @@ +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +A class to provide filtering of views of observable collections.. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +IsChecked property should be bound similar to this sample code: + + + + +""" + +# +# License: +# +# +# Revit Batch Processor Sample Code +# +# BSD License +# Copyright 2024, Jan Christel +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +# - Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +# +# This software is provided by the copyright holder "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. +# In no event shall the copyright holder be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; +# or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. +# +# +# + +from duHast.UI.Objects.WPF.ViewModels.ViewModelBase import ViewModelBase + +class FilterItem(ViewModelBase): + def __init__(self, value, refresh_view_method): + """ + A filter item for a view model view. + + Is used in column context menu filters to filter the view based on the value of the item. + + :param value: the value of the item + :param refresh_view_method: the method to call to refresh the view + """ + + super(FilterItem, self).__init__() + self._value = value + self.refresh_view_method = refresh_view_method + + # default to checked + self._is_checked = True + + @property + def Value(self): + """ + Gets the value of the item displayed in the context menu. + """ + + return self._value + + @property + def IsChecked(self): + """ + Gets the is checked value of the item. + """ + + return self._is_checked + + @IsChecked.setter + def IsChecked(self, value): + """ + Sets the is checked value of the item. + """ + + if self._is_checked != value: + self._is_checked = value + self.RaisePropertyChanged("IsChecked") + + # refresh the view to show the changes + if self.refresh_view_method: + self.refresh_view_method() + + + diff --git a/src/duHast/Utilities/files_combine.py b/src/duHast/Utilities/files_combine.py index e26b6d69..5f5a1017 100644 --- a/src/duHast/Utilities/files_combine.py +++ b/src/duHast/Utilities/files_combine.py @@ -26,12 +26,12 @@ # # - import codecs import glob import os -from csv import QUOTE_MINIMAL +import csv +from duHast.Utilities.Objects.result import Result from duHast.Utilities.files_io import get_file_name_without_ext from duHast.Utilities.files_get import get_files_single_directory from duHast.Utilities.files_tab import get_unique_headers as get_unique_headers_tab @@ -46,8 +46,10 @@ def combine_files( file_prefix="", file_suffix="", file_extension=".txt", - out_put_file_name="result.txt", + output_file_name="result.txt", file_getter=get_files_single_directory, + delimiter=",", + quoting=csv.QUOTE_MINIMAL ): """ Combines multiple text files into a single new file. @@ -56,6 +58,7 @@ def combine_files( - files have a header row followed by data rows - same number of headers (columns) in each files. - files have the same header names per column + - files are encoded in UTF-8! The new file will be saved into the same folder as the original files. @@ -67,123 +70,95 @@ def combine_files( :type file_suffix: str :param file_extension: Filter: File needs to have this file extension :type file_extension: str, format '.extension' - :param out_put_file_name: The file name of the combined file, defaults to 'result.txt' - :type out_put_file_name: str, optional - :param file_getter: Function returning list of files to be combined, defaults to GetFilesSingleFolder - :type file_getter: func(folder_path, file_prefix, file_suffix, file_extension), optional - """ - - file_list = file_getter(folder_path, file_prefix, file_suffix, file_extension) - - errors = [] - - with open(os.path.join(folder_path, out_put_file_name), "w") as result: - - file_counter = 0 - for file_ in file_list: - try: - line_counter = 0 - fp = codecs.open(file_, "r", encoding="utf-8") - # fp = open(file_, "r") - lines = fp.readlines() - fp.close() - for line in lines: - # ensure header from first file is copied over - if file_counter == 0 and line_counter == 0 or line_counter != 0: - result.write(line) - line_counter += 1 - - file_counter += 1 - except Exception as e: - errors.append( - "File: {} failed to combine with exception: {}".format(file_, e) - ) - - # raise any errors - if len(errors) > 0: - raise ValueError("\n".join(errors)) - - -def combine_files_basic( - folder_path, - file_prefix="", - file_suffix="", - file_extension=".txt", - out_put_file_name="result.txt", - file_getter=get_files_single_directory, -): - """ - Combines multiple text files into a single new file. - Assumes: - - - files are text files - - The new file will be saved into the same folder as the original files. - - :param folder_path: Folder path from which to get files to be combined and to which the combined file will be saved. - :type folder_path: str - :param file_prefix: Filter: File name starts with this value - :type file_prefix: str - :param file_suffix: Filter: File name ends with this value. - :type file_suffix: str - :param file_extension: Filter: File needs to have this file extension - :type file_extension: str, format '.extension' - :param out_put_file_name: The file name of the combined file, defaults to 'result.txt' - :type out_put_file_name: str, optional + :param output_file_name: The file name of the combined file, defaults to 'result.txt' + :type output_file_name: str, optional :param file_getter: Function returning list of files to be combined, defaults to GetFilesSingleFolder :type file_getter: func(folder_path, file_prefix, file_suffix, file_extension), optional + :param delimiter: The delimiter used in the files (e.g., ',' for CSV, '\t' for tab-separated), defaults to ',' + :type delimiter: str, optional + :param quoting: The quoting option for the CSV writer, defaults to csv.QUOTE_MINIMAL + :type quoting: int, optional """ - file_list = file_getter(folder_path, file_prefix, file_suffix, file_extension) - with open(os.path.join(folder_path, out_put_file_name), "w") as f: - for file_ in file_list: - fp = open(file_, "r") - lines = fp.readlines() - fp.close() - for line in lines: - f.write(line) - f.close() - - -def append_to_file(source_file, append_file, ignore_first_row=False): + return_value = Result() + try: + # check a file getter function was provided + if(file_getter is None): + return_value.update_sep(False, "No file getter function provided.") + return return_value + # get files to combine using file getter function + file_list = file_getter(folder_path, file_prefix, file_suffix, file_extension) + + # loop over file and combine... + with open(os.path.join(folder_path, output_file_name), "w", newline='', encoding="utf-8") as result: + writer = csv.writer(result, delimiter=delimiter, quoting=quoting) + + file_counter = 0 + for file_ in file_list: + try: + line_counter = 0 + with codecs.open(file_, "r", encoding="utf-8") as fp: + reader = csv.reader(fp, delimiter=delimiter) + for line in reader: + # ensure header from first file is copied over + if file_counter == 0 and line_counter == 0 or line_counter != 0: + writer.writerow(line) + line_counter += 1 + + file_counter += 1 + return_value.append_message("File: {} combined.".format(file_)) + except Exception as e: + return_value.update_sep(False, "File: {} failed to combine with exception: {}".format(file_, e)) + + except Exception as e: + return_value.update_sep(False, "Failed to combine files with exception: {}".format(e)) + return return_value + +def append_to_file(source_file, append_file, ignore_first_row=False, delimiter=",", quoting=csv.QUOTE_MINIMAL): """ - Appends one text file to another. Assumes same number of headers (columns) in both files. + Appends one text file to another. + + Assumes: + + - same number of headers (columns) in both files. + - files are encoded in UTF-8! :param source_file: The fully qualified file path of the file to which the other file will be appended. :type source_file: str :param append_file: The fully qualified file path of the file to be appended. :type append_file: str - :param ignore_first_row: If True, first row of append file will not be appended to source file. + :param ignore_first_row: If True, first row of append file will not be appended to source file.( Assumed its a header row ) :type ignore_first_row: bool + :param delimiter: The delimiter used in the files (e.g., ',' for CSV, '\t' for tab-separated), defaults to ',' + :type delimiter: str, optional + :param quoting: The quoting option for the CSV writer, defaults to csv.QUOTE_MINIMAL + :type quoting: int, optional :return: If True file was appended without an exception, otherwise False. :rtype: bool """ - flag = True + return_value = Result() try: # read file to append into memory...hopefully will never get in GB range in terms of file size - fp = codecs.open(append_file, "r", encoding="utf-8") - lines = fp.readlines() - fp.close() - with codecs.open(source_file, "a", encoding="utf-8") as f: - if ignore_first_row == False: + with open(append_file, "r", encoding="utf-8") as fp: + reader = csv.reader(fp, delimiter=delimiter) + lines = list(reader) + + with open(source_file, "a", encoding="utf-8", newline='') as f: + writer = csv.writer(f, delimiter=delimiter, quoting=quoting) + if not ignore_first_row: for line in lines: - f.write(line) + writer.writerow(line) else: # check if only a header row in file? if len(lines) > 1: - for x in range(1, len(lines)): - f.write(lines[x]) - # get out to avoid out of index exception - # if I change the for loop to be - # for x in range(1, len(lines)-1): - # it will not process files with two rows only ( a header and a single data row! ) - if x == len(lines) - 1: - break + for line in lines[1:]: + writer.writerow(line) - except Exception: - flag = False - return flag + return_value.append_message("File: {} appended to file: {}".format(append_file, source_file)) + except Exception as e: + return_value.update_sep(False, "Failed to append file with exception: {}".format(e)) + return return_value def _format_headers(headers_in_file, file): @@ -214,7 +189,7 @@ def combine_files_header_independent( file_prefix="", file_suffix="", file_extension=".txt", - out_put_file_name="result.txt", + output_file_name="result.txt", overwrite_existing=False, ): """ @@ -242,7 +217,7 @@ def combine_files_header_independent( ) # build list of unique headers headers = get_unique_headers_tab(file_list) - combined_file_name = os.path.join(folder_path, out_put_file_name) + combined_file_name = os.path.join(folder_path, output_file_name) # loop over files to be combined file_counter = 0 for file in file_list: @@ -296,7 +271,7 @@ def combine_files_csv_header_independent( file_prefix="", file_suffix="", file_extension=".txt", - out_put_file_name="result.csv", + output_file_name="result.csv", overwrite_existing=False, ): """ @@ -313,8 +288,8 @@ def combine_files_csv_header_independent( :type file_suffix: str :param file_extension: Filter: File needs to have this file extension :type file_extension: str, format '.extension' - :param out_put_file_name: The file name of the combined file, defaults to 'result.csv' - :type out_put_file_name: str, optional + :param output_file_name: The file name of the combined file, defaults to 'result.csv' + :type output_file_name: str, optional :param overwrite_existing: Will overwrite an existing output file if set to True, defaults to False ( append to existing output file) :type overwrite_existing: bool, optional """ @@ -324,7 +299,7 @@ def combine_files_csv_header_independent( ) # build list of unique headers headers = get_unique_headers_csv(file_list) - combined_file_name = os.path.join(folder_path, out_put_file_name) + combined_file_name = os.path.join(folder_path, output_file_name) # loop over files and combine... file_counter = 0 @@ -378,7 +353,7 @@ def combine_files_csv_header_independent( enforce_ascii=False, encoding="utf-8", bom=None, - quoting=QUOTE_MINIMAL + quoting=csv.QUOTE_MINIMAL ) file_counter += 1 @@ -388,7 +363,7 @@ def combine_files_json( file_prefix="", file_suffix="", file_extension=".txt", - out_put_file_name="result.txt", + output_file_name="result.txt", file_getter=get_files_single_directory, ): """ @@ -425,7 +400,7 @@ def combine_files_json( # write json data out result_write = write_json_to_file( json_data=json_objects, - data_output_file_path=os.path.join(folder_path, out_put_file_name), + data_output_file_path=os.path.join(folder_path, output_file_name), ) # return flag only diff --git a/test/NUnitTests/PythonTests/Revit/Family/Data/FamilyFindMissingTests.cs b/test/NUnitTests/PythonTests/Revit/Family/Data/FamilyFindMissingTests.cs index af5548a1..1e2a12c8 100644 --- a/test/NUnitTests/PythonTests/Revit/Family/Data/FamilyFindMissingTests.cs +++ b/test/NUnitTests/PythonTests/Revit/Family/Data/FamilyFindMissingTests.cs @@ -8,6 +8,7 @@ public class FamilyFindMissingTests { private string dataTestDirectory; private List testFilesMultiple; + private List testFilesHostMultiple; [SetUp] public void SetUp() @@ -23,6 +24,13 @@ public void SetUp() new TestFileData("Section Tail - Upgrade", true, 1, new List> { new List { "Section Marks" } }), new TestFileData("Symbol_Outlet_GPO_Single_Emergency_ANN", true, 1, new List> { new List { "Generic Annotations" } }), }; + + testFilesHostMultiple = new List + { + new TestFileData("Sample_Family_Six", true, 1, new List> { new List { "Specialty Equipment" } }), + new TestFileData("Sample_Family_Nine", true, 1, new List> { new List { "Furniture Systems" } }), + new TestFileData("Sample_Family_Two", true, 1, new List> { new List { "Furniture Systems" } }), + }; } [Test] @@ -45,7 +53,12 @@ public void TestCheckFamiliesMissingFromLibrary() Console.WriteLine(testResultMissing.message); Assert.That(testResultMissing.status, Is.True, "Expecting successfully reading of all files"); Assert.That(testResultMissing.result.Count, Is.EqualTo(testFilesMultiple.Count), "Expecting number of missing root families to match"); - + + foreach (var missingFam in testResultMissing.result) + { + Console.WriteLine(missingFam); + } + // Check if all expected data is present foreach (var testFile in testFilesMultiple) @@ -65,5 +78,38 @@ public void TestCheckFamiliesMissingFromLibrary() } } } + [Test] + public void TestFindMissingFamiliesDirectHostFamilies() + { + dynamic familyReportReader = PythonEngineManager.FamilyMissingFamiliesModule; + string dataTestDirectoryReader01 = Path.Combine(dataTestDirectory, @"ReadMissingFamilies_01\FamilyBaseDataCombinedReport_original.csv"); + Console.WriteLine(dataTestDirectoryReader01); + + var testResultDirectHostFamilies = familyReportReader.find_missing_families_direct_host_families(dataTestDirectoryReader01); + Console.WriteLine(testResultDirectHostFamilies.result); + Console.WriteLine(testResultDirectHostFamilies.message); + Assert.That(testResultDirectHostFamilies.status, Is.True, "Expecting successfully reading of all files"); + Assert.That(testResultDirectHostFamilies.result.Count, Is.EqualTo(testFilesHostMultiple.Count), "Expecting number of direct host families to match"); + + // Check if all expected data is present + foreach (var testFile in testFilesHostMultiple) + { + bool foundMatch = false; + foreach (var familyData in testResultDirectHostFamilies.result) + { + Console.WriteLine(familyData.family_name + " " + familyData.family_category + " >> " + testFile.FileName); + if (familyData.family_name == testFile.FileName && familyData.family_category == testFile.ExpectedData[0][0]) + { + foundMatch = true; + break; + } + } + if (!foundMatch) + { + Assert.Fail($"No match found for family: {testFile.FileName} with category: {testFile.ExpectedData[0][0]}"); + } + } + } + } } diff --git a/test/NUnitTests/PythonTests/UtilitiesTests/FileCombineTests.cs b/test/NUnitTests/PythonTests/UtilitiesTests/FileCombineTests.cs index 2de90902..72051fe4 100644 --- a/test/NUnitTests/PythonTests/UtilitiesTests/FileCombineTests.cs +++ b/test/NUnitTests/PythonTests/UtilitiesTests/FileCombineTests.cs @@ -41,10 +41,12 @@ public void CombineFiles_CreatesCombinedFile() File.WriteAllText(file2, "Header1,Header2\nValue3,Value4\n"); // Act - fileCombiner.combine_files(tempDirectory, "", "", ".txt", "combined.txt"); + var result = fileCombiner.combine_files(folder_path: tempDirectory); + Console.WriteLine(result.message); // Assert - string combinedFilePath = Path.Combine(tempDirectory, "combined.txt"); + Assert.That(result.status, Is.True); + string combinedFilePath = Path.Combine(tempDirectory, "result.txt"); // Default output file name Assert.That(File.Exists(combinedFilePath), Is.True); string combinedContent = File.ReadAllText(combinedFilePath); string[] lines = combinedContent.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); @@ -59,31 +61,65 @@ public void CombineFiles_CreatesCombinedFile() } [Test] - public void CombineFilesBasic_CreatesCombinedFile() + public void CombineFiles_CreatesCombinedCsvFile() + { + dynamic fileCombiner = PythonEngineManager.FileCombineModule; + + // Arrange + string file1 = Path.Combine(tempDirectory, "file1.csv"); + string file2 = Path.Combine(tempDirectory, "file2.csv"); + File.WriteAllText(file1, "Header1,Header2\nValue1,Value2\nValue5,Value6\n"); + File.WriteAllText(file2, "Header1,Header2\nValue3,Value4\nValue7,Value8\n"); + + // Act + var result = fileCombiner.combine_files(folder_path: tempDirectory, file_extension:".csv",output_file_name: "combined.csv"); + + // Assert + string combinedFilePath = Path.Combine(tempDirectory, "combined.csv"); + Assert.That(File.Exists(combinedFilePath), Is.True); + string combinedContent = File.ReadAllText(combinedFilePath); + string[] lines = combinedContent.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); + + // Check for the number of rows + Assert.That(lines.Length, Is.EqualTo(5)); // 1 header row + 4 data rows + + // Check for the content + Assert.That(combinedContent, Does.Contain("Header1,Header2")); + Assert.That(combinedContent, Does.Contain("Value1,Value2")); + Assert.That(combinedContent, Does.Contain("Value3,Value4")); + Assert.That(combinedContent, Does.Contain("Value5,Value6")); + Assert.That(combinedContent, Does.Contain("Value7,Value8")); + } + + [Test] + public void CombineFiles_CreatesCombinedTabSeparatedFile() { dynamic fileCombiner = PythonEngineManager.FileCombineModule; // Arrange string file1 = Path.Combine(tempDirectory, "file1.txt"); string file2 = Path.Combine(tempDirectory, "file2.txt"); - File.WriteAllText(file1, "Value1\n"); - File.WriteAllText(file2, "Value2\n"); + File.WriteAllText(file1, "Header1\tHeader2\nValue1\tValue2\n"); + File.WriteAllText(file2, "Header1\tHeader2\nValue3\tValue4\n"); // Act - fileCombiner.combine_files_basic(tempDirectory, "", "", ".txt", "combined.txt"); + var result = fileCombiner.combine_files(folder_path: tempDirectory, delimiter: "\t"); + Console.WriteLine(result.message); // Assert - string combinedFilePath = Path.Combine(tempDirectory, "combined.txt"); + Assert.That(result.status, Is.True); + string combinedFilePath = Path.Combine(tempDirectory, "result.txt"); // Default output file name Assert.That(File.Exists(combinedFilePath), Is.True); string combinedContent = File.ReadAllText(combinedFilePath); string[] lines = combinedContent.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); // Check for the number of rows - Assert.That(lines.Length, Is.EqualTo(2)); // 2 data rows + Assert.That(lines.Length, Is.EqualTo(3)); // 1 header row + 2 data rows // Check for the content - Assert.That(combinedContent, Does.Contain("Value1")); - Assert.That(combinedContent, Does.Contain("Value2")); + Assert.That(combinedContent, Does.Contain("Header1\tHeader2")); + Assert.That(combinedContent, Does.Contain("Value1\tValue2")); + Assert.That(combinedContent, Does.Contain("Value3\tValue4")); } [Test] @@ -98,10 +134,11 @@ public void AppendToFile_AppendsContent() File.WriteAllText(appendFile, "Header1,Header2\nValue3,Value4\n"); // Act - bool result = fileCombiner.append_to_file(sourceFile, appendFile, true); + var result = fileCombiner.append_to_file(sourceFile, appendFile, true); // Assert - Assert.That(result, Is.True); + Console.WriteLine(result.message); + Assert.That(result.status, Is.True); string combinedContent = File.ReadAllText(sourceFile); string[] lines = combinedContent.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); @@ -113,6 +150,62 @@ public void AppendToFile_AppendsContent() Assert.That(combinedContent, Does.Contain("Value3,Value4")); } + [Test] + public void AppendToFile_AppendsCsvContent() + { + dynamic fileCombiner = PythonEngineManager.FileCombineModule; + + // Arrange + string sourceFile = Path.Combine(tempDirectory, "source.csv"); + string appendFile = Path.Combine(tempDirectory, "append.csv"); + File.WriteAllText(sourceFile, "Header1,Header2\nValue1,Value2\n"); + File.WriteAllText(appendFile, "Header1,Header2\nValue3,Value4\n"); + + // Act + var result = fileCombiner.append_to_file(sourceFile, appendFile, true); + + // Assert + Console.WriteLine(result.message); + Assert.That(result.status, Is.True); + string combinedContent = File.ReadAllText(sourceFile); + string[] lines = combinedContent.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); + + // Check for the number of rows + Assert.That(lines.Length, Is.EqualTo(3)); // 1 header row + 2 data rows + + // Check for the content + Assert.That(combinedContent, Does.Contain("Value1,Value2")); + Assert.That(combinedContent, Does.Contain("Value3,Value4")); + } + + [Test] + public void AppendToFile_AppendsTabSeparatedContent() + { + dynamic fileCombiner = PythonEngineManager.FileCombineModule; + + // Arrange + string sourceFile = Path.Combine(tempDirectory, "source.txt"); + string appendFile = Path.Combine(tempDirectory, "append.txt"); + File.WriteAllText(sourceFile, "Header1\tHeader2\nValue1\tValue2\n"); + File.WriteAllText(appendFile, "Header1\tHeader2\nValue3\tValue4\n"); + + // Act + var result = fileCombiner.append_to_file(sourceFile, appendFile, true); + + // Assert + Console.WriteLine(result.message); + Assert.That(result.status, Is.True); + string combinedContent = File.ReadAllText(sourceFile); + string[] lines = combinedContent.Split(new[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); + + // Check for the number of rows + Assert.That(lines.Length, Is.EqualTo(3)); // 1 header row + 2 data rows + + // Check for the content + Assert.That(combinedContent, Does.Contain("Value1\tValue2")); + Assert.That(combinedContent, Does.Contain("Value3\tValue4")); + } + [Test] public void CombineFilesHeaderIndependent_CreatesCombinedFile() {