From 971fc6a341da2017a2ad9925c6092a922990e7df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cristiano=20K=C3=B6hler?= <42555442+kohlerca@users.noreply.github.com> Date: Fri, 27 Oct 2023 17:37:07 +0200 Subject: [PATCH] [ENH] Improved output messages/status information of ASSET functions (#570) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * deprecated verbose parameter * use logging instead of print statements --------- Co-authored-by: Cristiano Köhler Co-authored-by: Moritz-Alexander-Kern Co-authored-by: Moritz Kern <92092328+Moritz-Alexander-Kern@users.noreply.github.com> --- elephant/asset/asset.py | 224 +++++++++++++++++++++++------------- elephant/test/test_asset.py | 25 +++- 2 files changed, 165 insertions(+), 84 deletions(-) diff --git a/elephant/asset/asset.py b/elephant/asset/asset.py index e4d7a6fed..c4968d0a4 100644 --- a/elephant/asset/asset.py +++ b/elephant/asset/asset.py @@ -49,7 +49,7 @@ * simulate two noisy synfire chains; * shuffle the neurons to destroy visual appearance; - * run ASSET analysis to recover the original neurons arrangement. + * run ASSET analysis to recover the original neurons' arrangement. 1. Simulate two noise synfire chains, shuffle the neurons to destroy the pattern visually, and store shuffled activations in neo.SpikeTrains. @@ -83,11 +83,6 @@ >>> pmat = asset_obj.probability_matrix_analytical(imat, ... kernel_width=50*pq.ms) - compute rates by boxcar-kernel convolution... - compute the prob. that each neuron fires in each pair of bins... - compute the probability matrix by Le Cam's approximation... - substitute 0.5 to elements along the main diagonal... - 5. Compute the joint probability matrix `jmat`, using a suitable filter: @@ -129,7 +124,6 @@ import math import os import subprocess -import sys import tempfile import warnings from pathlib import Path @@ -146,6 +140,7 @@ import elephant.conversion as conv from elephant import spike_train_surrogates from elephant.utils import get_cuda_capability_major, get_opencl_capability +import logging try: from mpi4py import MPI @@ -172,6 +167,14 @@ "get_sse_start_and_end_time_bins" ] +# Create logger and set configuration +logger = logging.getLogger(__file__) +log_handler = logging.StreamHandler() +log_handler.setFormatter( + logging.Formatter(f"[%(asctime)s] {__name__[__name__.rfind('.')+1::]} -" + " %(levelname)s: %(message)s")) +logger.addHandler(log_handler) +logger.propagate = False # ============================================================================= # Some Utility Functions to be dealt with in some way or another @@ -356,13 +359,13 @@ def _analog_signal_step_interp(signal, times): def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None, - mapped_array_file=None, verbose=False): + mapped_array_file=None, verbose=None): r""" Given a list of points on the real plane, identified by their abscissa `x` and ordinate `y`, compute a stretched transformation of the Euclidean distance among each of them. - The classical euclidean distance `d` between points `(x1, y1)` and + The classical Euclidean distance `d` between points `(x1, y1)` and `(x2, y2)`, i.e., :math:`\sqrt((x1-x2)^2 + (y1-y2)^2)`, is multiplied by a factor @@ -406,9 +409,15 @@ def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None, array). This option should be used when there is not enough memory to allocate the full stretched distance matrix needed before DBSCAN. Default: None - verbose : bool, optional - Display progress bars and log messages. - Default: False + verbose : bool, optional, .. deprecated:: 0.14.0 + This parameter is no longer functional. To control the verbosity + of log messages, please use the module's logger that is based on the + standard logging module. + Logging is turned on by default (to level INFO). + To restrict logging messages, use a higher logging level to WARNING or + ERROR, e.g., import logging from elephant.asset.asset import logger as + asset_logger asset_logger.set_level(logging.WARNING). + Default: None Returns ------- @@ -422,6 +431,14 @@ def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None, pairwise distances when using chunked computations. """ + if verbose is not None: + warnings.warn("The 'verbose' parameter is deprecated and will be " + "removed in the future. Its functionality is still " + "available by using the logging module from Python. " + "We recommend transitioning to the logging module " + "for improved control and flexibility in handling " + "verbosity levels.", DeprecationWarning) + alpha = np.deg2rad(ref_angle) # reference angle in radians # Create the array of points (one per row) for which to compute the @@ -450,7 +467,10 @@ def calculate_stretch_mat(theta_mat, D_mat): return _stretch_mat if working_memory is None: - # Compute the matrix D[i, j] of euclidean distances among points + + logger.info("Finding distances without chunking") + + # Compute the matrix D[i, j] of Euclidean distances among points # i and j D = pairwise_distances(points) @@ -487,10 +507,10 @@ def calculate_stretch_mat(theta_mat, D_mat): last_chunk = len(x) % estimated_chunk if last_chunk > 0: it_todo += 1 - if verbose: - print(f"Estimated chunk size: {estimated_chunk}; " - f"Dimension: ({len(x)}, {len(y)}), " - f"Number of chunked iterations: {it_todo}") + + logger.info(f"Estimated chunk size: {estimated_chunk}; " + f"Dimension: ({len(x)}, {len(y)}), " + f"Number of chunked iterations: {it_todo}") # x and y sizes are the same if mapped_array_file is None: @@ -508,8 +528,8 @@ def calculate_stretch_mat(theta_mat, D_mat): else: # Using an array mapped to disk. Store in the file passed as # parameter - if verbose: - print(f"Creating disk array at '{mapped_array_file.name}'.") + + logger.info(f"Creating disk array at '{mapped_array_file.name}'.") stretch_mat = np.memmap(mapped_array_file, mode='w+', shape=(len(x), len(y)), @@ -523,7 +543,7 @@ def calculate_stretch_mat(theta_mat, D_mat): pairwise_distances_chunked(points, working_memory=working_memory), desc='Pairwise distances chunked', - total=it_todo, disable=not verbose): + total=it_todo): chunk_size = D_chunk.shape[0] @@ -554,10 +574,17 @@ def calculate_stretch_mat(theta_mat, D_mat): return stretch_mat -def _interpolate_signals(signals, sampling_times, verbose=False): +def _interpolate_signals(signals, sampling_times, verbose=None): """ Interpolate signals at given sampling times. """ + if verbose is not None: + warnings.warn("The 'verbose' parameter is deprecated and will be " + "removed in the future. Its functionality is still " + "available by using the logging module from Python. " + "We recommend transitioning to the logging module " + "for improved control and flexibility in handling " + "verbosity levels.", DeprecationWarning) # Reshape all signals to one-dimensional array object (e.g. AnalogSignal) for i, signal in enumerate(signals): if signal.ndim == 2: @@ -565,8 +592,7 @@ def _interpolate_signals(signals, sampling_times, verbose=False): elif signal.ndim > 2: raise ValueError('elements in fir_rates must have 2 dimensions') - if verbose: - print('create time slices of the rates...') + logger.info("Create time slices of the rates...") # Interpolate in the time bins interpolated_signal = np.vstack([_analog_signal_step_interp( @@ -649,12 +675,19 @@ def _split_axis(self, chunk_size, axis_size, min_chunk_size=None): class _JSFUniformOrderStat3D(_GPUBackend): - def __init__(self, n, d, precision='float', verbose=False, + def __init__(self, n, d, precision='float', verbose=None, cuda_threads=64, cuda_cwr_loops=32, tolerance=1e-5, max_chunk_size=None): super().__init__(max_chunk_size=max_chunk_size) if d > n: raise ValueError(f"d ({d}) must be less or equal n ({n})") + if verbose is not None: + warnings.warn("The 'verbose' parameter is deprecated and will be " + "removed in the future. Its functionality is still " + "available by using the logging module from Python. " + "We recommend transitioning to the logging module " + "for improved control and flexibility in handling " + "verbosity levels.", DeprecationWarning) self.n = n self.d = d self.precision = precision @@ -751,8 +784,7 @@ def cpu(self, log_du): for iter_id, matrix_entries in enumerate( tqdm(self._combinations_with_replacement(), total=self.num_iterations, - desc="Joint survival function", - disable=not self.verbose)): + desc="Joint survival function")): # if we are running with MPI if mpi_accelerated and iter_id % size != rank: continue @@ -820,8 +852,8 @@ def pyopencl(self, log_du, device_id=0): u_length = log_du.shape[0] context = cl.create_some_context(interactive=False) - if self.verbose: - print("Available OpenCL devices:\n", context.devices) + + logger.info(f"Available OpenCL devices:\n {context.devices}") device = context.devices[device_id] # A queue bounded to the device @@ -872,10 +904,9 @@ def pyopencl(self, log_du, device_id=0): # grid_size must be at least l_num_blocks grid_size = l_num_blocks - if self.verbose: - print(f"[Joint prob. matrix] it_todo={it_todo}, " - f"grid_size={grid_size}, L_BLOCK={l_block}, " - f"N_THREADS={n_threads}") + logger.info(f"[Joint prob. matrix] it_todo={it_todo}, " + f"grid_size={grid_size}, L_BLOCK={l_block}, " + f"N_THREADS={n_threads}") # OpenCL defines unsigned long as uint64, therefore we're adding # the LU suffix, not LLU, which would indicate unsupported uint128 @@ -963,10 +994,9 @@ def pycuda(self, log_du): # grid_size must be at least l_num_blocks grid_size = l_num_blocks - if self.verbose: - print(f"[Joint prob. matrix] it_todo={it_todo}, " - f"grid_size={grid_size}, L_BLOCK={l_block}, " - f"N_THREADS={n_threads}") + logger.info(f"[Joint prob. matrix] it_todo={it_todo}, " + f"grid_size={grid_size}, L_BLOCK={l_block}, " + f"N_THREADS={n_threads}") asset_cu = self._compile_template( template_name="joint_pmat.cu", @@ -1011,8 +1041,7 @@ def _cuda(self, log_du): template_name="joint_pmat_old.cu", L=f"{log_du.shape[0]}LLU", N_THREADS=self.cuda_threads, - ITERATIONS_TODO=f"{self.num_iterations}LLU", - ASSET_DEBUG=int(self.verbose) + ITERATIONS_TODO=f"{self.num_iterations}LLU" ) with tempfile.TemporaryDirectory() as asset_tmp_folder: asset_cu_path = os.path.join(asset_tmp_folder, 'asset.cu') @@ -1030,9 +1059,9 @@ def _cuda(self, log_du): compile_status = subprocess.run( compile_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if self.verbose: - print(compile_status.stdout.decode()) - print(compile_status.stderr.decode(), file=sys.stderr) + + logger.info(compile_status.stdout.decode()) + logger.info(compile_status.stderr.decode()) compile_status.check_returncode() log_du_path = os.path.join(asset_tmp_folder, "log_du.dat") P_total_path = os.path.join(asset_tmp_folder, "P_total.dat") @@ -1041,9 +1070,9 @@ def _cuda(self, log_du): run_status = subprocess.run( [asset_bin_path, log_du_path, P_total_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if self.verbose: - print(run_status.stdout.decode()) - print(run_status.stderr.decode(), file=sys.stderr) + + logger.info(run_status.stdout.decode()) + logger.info(run_status.stderr.decode()) run_status.check_returncode() with open(P_total_path, 'rb') as f: P_total = np.fromfile(f, dtype=self.dtype) @@ -1117,10 +1146,19 @@ class _PMatNeighbors(_GPUBackend): The number of largest neighbors to collect for each entry in `mat`. """ - def __init__(self, filter_shape, n_largest, max_chunk_size=None): + def __init__(self, filter_shape, n_largest, max_chunk_size=None, + verbose=None): super().__init__(max_chunk_size=max_chunk_size) self.n_largest = n_largest self.max_chunk_size = max_chunk_size + if verbose is not None: + warnings.warn("The 'verbose' parameter is deprecated and will be " + "removed in the future. Its functionality is still " + "available by using the logging module from Python. " + "We recommend transitioning to the logging module " + "for improved control and flexibility in handling " + "verbosity levels.", DeprecationWarning) + self.verbose = verbose filter_size, filter_width = filter_shape if filter_width >= filter_size: @@ -1198,7 +1236,8 @@ def pyopencl(self, mat): dtype=np.float32 ) - for i_start, i_end in split_idx: + for i_start, i_end in tqdm(split_idx, total=len(split_idx), + desc="Largest neighbors OpenCL"): mat_gpu = cl_array.to_device(queue, mat[i_start: i_end + filt_size], async_=True) @@ -1266,6 +1305,7 @@ def pycuda(self, mat): lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1] free, total = drv.mem_get_info() + # 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols chunk_size = (free // 4 - filt_size * lmat.shape[1]) // ( lmat.shape[1] * (self.n_largest + 1)) @@ -1281,7 +1321,8 @@ def pycuda(self, mat): mat_gpu = drv.mem_alloc(4 * (chunk_size + filt_size) * mat.shape[1]) - for i_start, i_end in split_idx: + for i_start, i_end in tqdm(split_idx, total=len(split_idx), + desc="Largest neighbors CUDA"): drv.memcpy_htod_async(dest=mat_gpu, src=mat[i_start: i_end + filt_size]) lmat_gpu.fill(0) @@ -1328,10 +1369,10 @@ def compute(self, mat): `mat`. For each entry `mat[i, j]`, collects the `n_largest` elements with - largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`, + the largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`, and assigns them to `L[i, j, :]`. - The zone around `mat[i, j]` where largest neighbors are collected from - is a rectangular area (kernel) of shape `(l, w) = filter_shape` + The zone around `mat[i, j]` where the largest neighbors are collected + from is a rectangular area (kernel) of shape `(l, w) = filter_shape` centered around `mat[i, j]` and aligned along the diagonal. If `mat` is symmetric, only the triangle below the diagonal is @@ -1375,7 +1416,8 @@ def cpu(self, mat): bin_range_x = range(N_bin_x - filter_size + 1) # compute matrix of largest values - for y in bin_range_y: + for y in tqdm(bin_range_y, total=len(bin_range_y), + desc="Largest neighbors CPU"): if symmetric: # x range depends on y position bin_range_x = range(y - filter_size + 1) @@ -1684,7 +1726,7 @@ def synchronous_events_contained_in(sse1, sse2): # Return False if any pixel in sse1 is not contained in sse2, or if any # link of sse1 is not a subset of the corresponding link in sse2. - # Otherwise (if sse1 is a subset of sse2) continue + # Otherwise, if sse1 is a subset of sse2, continue for pixel1, link1 in sse11.items(): if pixel1 not in sse22.keys(): return False @@ -1743,7 +1785,7 @@ def synchronous_events_overlap(sse1, sse2): (see below), determines whether the two SSEs overlap. The SSEs overlap if they are not equal and none of them is a superset of - the other one but they are also not disjoint. + the other one, but they are also not disjoint. Both `sse1` and `sse2` must be provided as dictionaries of the type @@ -1972,9 +2014,6 @@ class ASSET(object): binning errors is performed. If a number, the binning will consider this value. Default: 'default' - verbose : bool, optional - If True, print messages and show progress bar. - Default: True Raises @@ -1993,7 +2032,16 @@ class ASSET(object): def __init__(self, spiketrains_i, spiketrains_j=None, bin_size=3 * pq.ms, t_start_i=None, t_start_j=None, t_stop_i=None, t_stop_j=None, - bin_tolerance='default', verbose=True): + bin_tolerance='default', verbose=None): + + if verbose is not None: + warnings.warn("The 'verbose' parameter is deprecated and will be " + "removed in the future. Its functionality is still " + "available by using the logging module from Python. " + "We recommend transitioning to the logging module " + "for improved control and flexibility in handling " + "verbosity levels.", DeprecationWarning) + self.spiketrains_i = spiketrains_i if spiketrains_j is None: spiketrains_j = spiketrains_i @@ -2209,8 +2257,7 @@ def probability_matrix_montecarlo(self, n_surrogates, imat=None, # equal to that of the original data pmat = np.zeros(imat.shape, dtype=np.int32) - for surr_id in trange(n_surrogates, desc="pmat_bootstrap", - disable=not self.verbose): + for surr_id in trange(n_surrogates, desc="pmat_bootstrap"): if mpi_accelerated and surr_id % size != rank: continue surrogates = [spike_train_surrogates.surrogates( @@ -2331,8 +2378,7 @@ def probability_matrix_analytical(self, imat=None, # for both axes, interpolate in the time bins of interest and # convert to Quantity fir_rate_x = _interpolate_signals( - firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1], - self.verbose) + firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1]) else: raise ValueError( 'fir_rates_x must be a list or the string "estimate"') @@ -2347,16 +2393,14 @@ def probability_matrix_analytical(self, imat=None, # for both axes, interpolate in the time bins of interest and # convert to Quantity fir_rate_y = _interpolate_signals( - firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1], - self.verbose) + firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1]) else: raise ValueError( 'fir_rates_y must be a list or the string "estimate"') # For each neuron, compute the prob. that that neuron spikes in any bin - if self.verbose: - print('compute the prob. that each neuron fires in each pair of ' - 'bins...') + logger.info("Compute the probability that each neuron fires in " + "each pair of bins...") rate_bins_x = (fir_rate_x * self.bin_size).simplified.magnitude spike_probs_x = 1. - np.exp(-rate_bins_x) @@ -2372,9 +2416,8 @@ def probability_matrix_analytical(self, imat=None, # matrices p_ijk computed for each neuron k: # p_ijk is the probability that neuron k spikes in both bins i and j. # The sum of outer products is equivalent to a dot product. - if self.verbose: - print( - "compute the probability matrix by Le Cam's approximation...") + logger.info("Compute the probability matrix by Le Cam's " + "approximation...") Mu = spike_probs_x.T.dot(spike_probs_y) # A straightforward implementation is: # pmat_shape = spike_probs_x.shape[1], spike_probs_y.shape[1] @@ -2388,8 +2431,8 @@ def probability_matrix_analytical(self, imat=None, if symmetric: # Substitute 0.5 to the elements along the main diagonal - if self.verbose: - print("substitute 0.5 to elements along the main diagonal...") + logger.info("Substitute 0.5 to elements along the main " + "diagonal...") np.fill_diagonal(pmat, 0.5) return pmat @@ -2464,7 +2507,7 @@ def joint_probability_matrix(self, pmat, filter_shape, n_largest, Tolerance is used to catch unexpected behavior of billions of floating point additions, when the number of iterations is huge or the data arrays are large. A warning is thrown when the - resulting joint prob. matrix values are outside of the acceptable + resulting joint prob. matrix values are outside the acceptable range ``[-tolerance, 1.0 + tolerance]``. Default: 1e-5 @@ -2478,7 +2521,7 @@ def joint_probability_matrix(self, pmat, filter_shape, n_largest, 1. By default, if CUDA is detected, CUDA acceleration is used. CUDA backend is **~X1000** faster than the Python implementation. To turn off CUDA features, set the environment flag - ``ELEPHANT_USE_CUDA`` to ``0``. Otherwise + ``ELEPHANT_USE_CUDA`` to ``0``. 2. If PyOpenCL is installed and detected, PyOpenCL backend is used. PyOpenCL backend is **~X100** faster than the Python implementation. To turn off OpenCL features, set the environment flag @@ -2495,6 +2538,8 @@ def joint_probability_matrix(self, pmat, filter_shape, n_largest, """ l, w = filter_shape + logger.info("Finding neighbors in probability matrix...") + # Find for each P_ij in the probability matrix its neighbors and # maximize them by the maximum value 1-p_value_min pmat = np.asarray(pmat, dtype=np.float32) @@ -2502,6 +2547,8 @@ def joint_probability_matrix(self, pmat, filter_shape, n_largest, n_largest=n_largest) pmat_neighb = pmat_neighb_obj.compute(pmat) + logger.info("Finding unique set of values...") + pmat_neighb = np.minimum(pmat_neighb, 1. - min_p_value, out=pmat_neighb) @@ -2518,7 +2565,6 @@ def joint_probability_matrix(self, pmat, filter_shape, n_largest, w + 1) # number of entries covered by kernel jsf = _JSFUniformOrderStat3D(n=n, d=pmat_neighb.shape[1], precision=precision, - verbose=self.verbose, cuda_threads=cuda_threads, cuda_cwr_loops=cuda_cwr_loops, tolerance=tolerance) @@ -2587,7 +2633,7 @@ def mask_matrices(matrices, thresholds): @staticmethod def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, stretch, working_memory=None, array_file=None, - keep_file=False, verbose=False): + keep_file=False, verbose=None): r""" Given a matrix `mask_matrix`, replaces its positive elements with integers representing different cluster IDs. Each cluster comprises @@ -2634,7 +2680,7 @@ def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, min_neighbors : int The minimum number of elements to form a neighbourhood. stretch : float - The stretching factor of the euclidean metric for elements aligned + The stretching factor of the Euclidean metric for elements aligned along the 135 degree direction (anti-diagonal). The actual stretching increases from 1 to `stretch` as the direction of the two elements moves from the 45 to the 135 degree direction. @@ -2663,9 +2709,6 @@ def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, This option can be used to access the distance matrix after the clustering. Default: False - verbose : bool, optional - Display log messages and progress bars. - Default: False Returns ------- @@ -2684,6 +2727,14 @@ def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, sklearn.cluster.DBSCAN """ + + if verbose is not None: + warnings.warn("The 'verbose' parameter is deprecated and will be " + "removed in the future. Its functionality is still " + "available by using the logging module from Python. " + "We recommend transitioning to the logging module " + "for improved control and flexibility in handling " + "verbosity levels.", DeprecationWarning) # Don't do anything if mat is identically zero if np.all(mask_matrix == 0): return mask_matrix @@ -2708,7 +2759,7 @@ def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, D = _stretched_metric_2d( xpos_sgnf, ypos_sgnf, stretch=stretch, ref_angle=45, working_memory=working_memory, - mapped_array_file=mapped_array_file, verbose=verbose) + mapped_array_file=mapped_array_file) except MemoryError as err: raise MemoryError("Set 'working_memory=100' or another value to " "chunk the data. If this does not solve, use the" @@ -2716,11 +2767,15 @@ def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, "a temporary file to map the array to the disk." ) from err + logger.info("Running DBSCAN") + # Cluster positions of significant pixels via dbscan core_samples, config = dbscan( D, eps=max_distance, min_samples=min_neighbors, metric='precomputed') + logger.info("Building cluster matrix") + # Construct the clustered matrix, where each element has value # * i = 1 to k if it belongs to a cluster i, # * 0 if it is not significant, @@ -2773,6 +2828,7 @@ def extract_synchronous_events(self, cmat, ids=None): return {} # Compute the transactions associated to the two binnings + logger.info("Finding transactions") tracts_x = _transactions( self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i, t_stop=self.t_stop_i, @@ -2789,7 +2845,9 @@ def extract_synchronous_events(self, cmat, ids=None): # Reconstruct each worm, link by link sse_dict = {} - for k in range(1, nr_worms + 1): # for each worm + for k in tqdm(range(1, nr_worms + 1), + total=nr_worms, + desc="Extracting SSEs"): # for each worm # worm k is a list of links (each link will be 1 sublist) worm_k = {} pos_worm_k = np.array( @@ -2814,8 +2872,8 @@ def _rate_of_binned_spiketrain(self, binned_spiketrains, kernel_width): Calculate the rate of binned spiketrains using convolution with a boxcar kernel. """ - if self.verbose: - print('compute rates by boxcar-kernel convolution...') + + logger.info("Compute rates by boxcar-kernel convolution...") # Create the boxcar kernel and convolve it with the binned spike trains k = int((kernel_width / self.bin_size).simplified.item()) diff --git a/elephant/test/test_asset.py b/elephant/test/test_asset.py index 22a9ab0db..5b7458114 100644 --- a/elephant/test/test_asset.py +++ b/elephant/test/test_asset.py @@ -129,6 +129,12 @@ def test_stretched_metric_2d_size(self): D = stretchedmetric2d(x, x, stretch=1, ref_angle=45) self.assertEqual(D.shape, (nr_points, nr_points)) + def test_stretched_metric_2d_deprecation_warning(self): + nr_points = 4 + x = np.arange(nr_points) + with self.assertWarns(DeprecationWarning): + stretchedmetric2d(x, x, stretch=1, ref_angle=45, verbose=True) + def test_stretched_metric_2d_correct_stretching(self): x = (0, 1, 0) y = (0, 0, 1) @@ -154,7 +160,7 @@ def test_stretched_metric_2d_equals_euclidean_if_stretch_1(self): # Compute Euclidean distance matrix points = np.vstack([x, y]).T E = scipy.spatial.distance_matrix(points, points) - # assert D == E + # assert if D is equal E assert_array_almost_equal(D, E, decimal=5) def test_get_sse_start_and_end_time_bins(self): @@ -388,6 +394,11 @@ def test_pmat_neighbors_gpu_overlapped_chunks(self): lmat_cuda = pmat_neigh.pycuda(pmat) assert_array_almost_equal(lmat_cuda, lmat_true) + def test_pmat_neighbors_deprecation_warning(self): + with self.assertWarns(DeprecationWarning): + asset._PMatNeighbors(filter_shape=(11, 5), n_largest=3, + max_chunk_size=12, verbose=True) + def test_pmat_neighbors_invalid_input(self): np.random.seed(12) pmat = np.random.random_sample((20, 20)) @@ -533,6 +544,14 @@ def pyopencl(self): os.environ['ELEPHANT_USE_OPENCL'] = '0' + def test_asset_deprecation_warning(self): + st1 = neo.SpikeTrain([1, 2, 4] * pq.ms, t_stop=6 * pq.ms) + st2 = neo.SpikeTrain([1, 3, 4] * pq.ms, t_stop=6 * pq.ms) + bin_size = 1 * pq.ms + with self.assertWarns(DeprecationWarning): + asset.ASSET([st1, st2], bin_size=bin_size, t_stop_i=5 * pq.ms, + t_stop_j=5 * pq.ms, verbose=True) + @unittest.skipUnless(HAVE_SKLEARN, 'requires sklearn') class TestJSFUniformOrderStat3D(unittest.TestCase): @@ -602,6 +621,10 @@ def test_invalid_values(self): u = np.empty((3, d + 1)) self.assertRaises(ValueError, jsf.compute, u=u) + def test_JSFUniformOrderStat3D_deprecation_warning(self): + with self.assertWarns(DeprecationWarning): + asset._JSFUniformOrderStat3D(n=5, d=1, verbose=True) + def test_point_mass_output(self): # When N >> D, the expected output is [1, 0] L, N, D = 2, 50, 2