From ac981dd2d383db7c4bc64e9a3b1ebaf964fd69e3 Mon Sep 17 00:00:00 2001 From: David Hassell Date: Wed, 30 Oct 2024 17:16:40 +0000 Subject: [PATCH] Remove REVIEW comments --- cf/cfimplementation.py | 3 -- cf/constants.py | 1 - cf/data/array/__init__.py | 8 ---- cf/data/array/cfah5netcdfarray.py | 1 - cf/data/array/cfanetcdf4array.py | 1 - cf/data/array/fullarray.py | 3 -- cf/data/array/h5netcdfarray.py | 3 -- cf/data/array/locks.py | 1 - cf/data/array/mixin/__init__.py | 3 -- cf/data/array/mixin/activestoragemixin.py | 3 -- cf/data/array/mixin/arraymixin.py | 1 - cf/data/array/mixin/cfamixin.py | 3 -- cf/data/array/mixin/indexmixin.py | 1 - cf/data/array/netcdf4array.py | 3 -- cf/data/array/netcdfarray.py | 1 - cf/data/array/umarray.py | 5 --- cf/data/collapse/collapse_active.py | 1 - cf/data/collapse/dask_collapse.py | 12 ----- cf/data/creation.py | 3 -- cf/data/dask_regrid.py | 2 - cf/data/dask_utils.py | 13 ------ cf/data/data.py | 46 -------------------- cf/data/fragment/__init__.py | 4 -- cf/data/fragment/fullfragmentarray.py | 1 - cf/data/fragment/h5netcdffragmentarray.py | 1 - cf/data/fragment/mixin/fragmentarraymixin.py | 2 - cf/data/fragment/netcdf4fragmentarray.py | 1 - cf/data/fragment/netcdffragmentarray.py | 3 -- cf/data/fragment/umfragmentarray.py | 2 - cf/data/utils.py | 4 -- cf/field.py | 2 - cf/functions.py | 15 ------- cf/read_write/netcdf/netcdfread.py | 8 ---- cf/read_write/netcdf/netcdfwrite.py | 5 --- cf/read_write/read.py | 4 -- cf/read_write/um/umread.py | 1 - cf/read_write/write.py | 1 - cf/regrid/regrid.py | 1 - cf/regrid/regridoperator.py | 1 - 39 files changed, 174 deletions(-) diff --git a/cf/cfimplementation.py b/cf/cfimplementation.py index b15a3929d9..db8601627c 100644 --- a/cf/cfimplementation.py +++ b/cf/cfimplementation.py @@ -27,7 +27,6 @@ ) from .data import Data -# REVIEW: h5: `cfimplementation.py`: import `CFAH5netcdfArray`, `CFANetCDF4Array`, `H5netcdfArray`,`NetCDF4Array` from .data.array import ( BoundsFromNodesArray, CellConnectivityArray, @@ -116,7 +115,6 @@ def set_construct(self, parent, construct, axes=None, copy=True, **kwargs): parent, construct, axes=axes, copy=copy, **kwargs ) - # REVIEW: h5: `initialise_CFANetCDF4Array`: new method to initialise `CFANetCDF4Array` def initialise_CFANetCDF4Array(self, **kwargs): """Return a `CFANetCDF4Array` instance. @@ -133,7 +131,6 @@ def initialise_CFANetCDF4Array(self, **kwargs): cls = self.get_class("CFANetCDF4Array") return cls(**kwargs) - # REVIEW: h5: `initialise_CFAH5netcdfArray`: new method to initialise `CFAH5netcdfArray` def initialise_CFAH5netcdfArray(self, **kwargs): """Return a `CFAH5netcdfArray` instance. diff --git a/cf/constants.py b/cf/constants.py index 3275a834f4..1472bd83d2 100644 --- a/cf/constants.py +++ b/cf/constants.py @@ -63,7 +63,6 @@ "LOG_LEVEL": logging.getLevelName(logging.getLogger().level), "BOUNDS_COMBINATION_MODE": "AND", "CHUNKSIZE": parse_bytes(_CHUNKSIZE), - # REVIEW: active: `CONSTANTS`: new constants 'active_storage', 'active_storage_url', 'active_storage_max_requests' "active_storage": False, "active_storage_url": None, "active_storage_max_requests": 100, diff --git a/cf/data/array/__init__.py b/cf/data/array/__init__.py index c57a72081a..cd2c53766b 100644 --- a/cf/data/array/__init__.py +++ b/cf/data/array/__init__.py @@ -1,19 +1,11 @@ from .boundsfromnodesarray import BoundsFromNodesArray from .cellconnectivityarray import CellConnectivityArray - -# REVIEW: h5: `__init__.py`: import `CFAH5netcdfArray` from .cfah5netcdfarray import CFAH5netcdfArray - -# REVIEW: h5: `__init__.py`: import `CFAH5netcdfArray` from .cfanetcdf4array import CFANetCDF4Array from .fullarray import FullArray from .gatheredarray import GatheredArray - -# REVIEW: h5: `__init__.py`: import `H5netcdfArray` from .h5netcdfarray import H5netcdfArray from .netcdfarray import NetCDFArray - -# REVIEW: h5: `__init__.py`: import `NetCDF4Array` from .netcdf4array import NetCDF4Array from .pointtopologyarray import PointTopologyArray from .raggedcontiguousarray import RaggedContiguousArray diff --git a/cf/data/array/cfah5netcdfarray.py b/cf/data/array/cfah5netcdfarray.py index edcdfceeda..47c58bff06 100644 --- a/cf/data/array/cfah5netcdfarray.py +++ b/cf/data/array/cfah5netcdfarray.py @@ -1,4 +1,3 @@ -# REVIEW: h5: `CFAH5netcdfArray`: New class for accessing CFA with `h5netcdf` from .h5netcdfarray import H5netcdfArray from .mixin import CFAMixin diff --git a/cf/data/array/cfanetcdf4array.py b/cf/data/array/cfanetcdf4array.py index 73d926b44b..b3b6b69d7a 100644 --- a/cf/data/array/cfanetcdf4array.py +++ b/cf/data/array/cfanetcdf4array.py @@ -1,4 +1,3 @@ -# REVIEW: h5: `CFAnetCDF4Array`: New class for accessing CFA with `netCDF4` from .mixin import CFAMixin from .netcdf4array import NetCDF4Array diff --git a/cf/data/array/fullarray.py b/cf/data/array/fullarray.py index 11f9a26aa1..81278c3407 100644 --- a/cf/data/array/fullarray.py +++ b/cf/data/array/fullarray.py @@ -16,7 +16,6 @@ class FullArray(IndexMixin, Array): """ - # REVIEW: h5: `__init__`: replace units/calendar API with attributes def __init__( self, fill_value=None, @@ -121,7 +120,6 @@ def __str__(self): return f"Filled with {fill_value!r}" - # REVIEW: getitem: `_get_array`: new method to convert subspace to numpy array def _get_array(self, index=None): """Returns the full array. @@ -156,7 +154,6 @@ def _get_array(self, index=None): return array - # REVIEW: getitem: `array`: New property to convert subspace to numpy array @property def array(self): """Return an independent numpy array containing the data. diff --git a/cf/data/array/h5netcdfarray.py b/cf/data/array/h5netcdfarray.py index f4355ac4f0..02cd0f1cc5 100644 --- a/cf/data/array/h5netcdfarray.py +++ b/cf/data/array/h5netcdfarray.py @@ -1,4 +1,3 @@ -# REVIEW: h5: `H5netcdfArray`: New class to access netCDF with `h5netcdf` import cfdm from ...mixin_container import Container @@ -49,8 +48,6 @@ def _lock(self): """ return netcdf_lock - # REVIEW: h5: `_get_array`: Ignore this for h5 review - # REVIEW: getitem: `_get_array`: new method to convert subspace to numpy array. def _get_array(self, index=None): """Returns a subspace of the dataset variable. diff --git a/cf/data/array/locks.py b/cf/data/array/locks.py index efa0114699..5a7b2bd333 100644 --- a/cf/data/array/locks.py +++ b/cf/data/array/locks.py @@ -1,4 +1,3 @@ -# REVIEW: h5: `locks.py`: New module to provide file locks from dask.utils import SerializableLock # Global lock for netCDF file access diff --git a/cf/data/array/mixin/__init__.py b/cf/data/array/mixin/__init__.py index 7db2cd73e8..8e5dd7690d 100644 --- a/cf/data/array/mixin/__init__.py +++ b/cf/data/array/mixin/__init__.py @@ -1,9 +1,6 @@ -# REVIEW: active: `__init__.py`: import `ActiveStorageMixin` from .activestoragemixin import ActiveStorageMixin from .arraymixin import ArrayMixin from .cfamixin import CFAMixin from .compressedarraymixin import CompressedArrayMixin from .filearraymixin import FileArrayMixin - -# REVIEW: getitem: `__init__.py`: import `IndexMixin` from .indexmixin import IndexMixin diff --git a/cf/data/array/mixin/activestoragemixin.py b/cf/data/array/mixin/activestoragemixin.py index 9843d09858..024bb05d04 100644 --- a/cf/data/array/mixin/activestoragemixin.py +++ b/cf/data/array/mixin/activestoragemixin.py @@ -1,6 +1,3 @@ -# REVIEW: active: `ActiveStorageMixin`: new mixin class `ActiveStorageMixin` - - class ActiveStorageMixin: """Mixin class for enabling active storage operations. diff --git a/cf/data/array/mixin/arraymixin.py b/cf/data/array/mixin/arraymixin.py index 41ad6b5d6b..3468253b36 100644 --- a/cf/data/array/mixin/arraymixin.py +++ b/cf/data/array/mixin/arraymixin.py @@ -18,7 +18,6 @@ def __array_function__(self, func, types, args, kwargs): """ return NotImplemented - # REVIEW: active: `_meta`: Moved to here from `FileArrayMixin` @property def _meta(self): """Normalise the array to an appropriate Dask meta object. diff --git a/cf/data/array/mixin/cfamixin.py b/cf/data/array/mixin/cfamixin.py index 43fc23cf85..6bcb01a468 100644 --- a/cf/data/array/mixin/cfamixin.py +++ b/cf/data/array/mixin/cfamixin.py @@ -36,7 +36,6 @@ def __new__(cls, *args, **kwargs): } return instance - # REVIEW: h5: `__init__`: replace units/calendar API with attributes def __init__( self, filename=None, @@ -225,7 +224,6 @@ def __init__( "substitutions", substitutions.copy(), copy=False ) - # REVIEW: h5: `_parse_cfa`: Refactoring of code that used to be in `__init__` def _parse_cfa(self, x, term, substitutions): """Parse the CFA aggregation instructions. @@ -466,7 +464,6 @@ def get_fragment_shape(self): """ return self._get_component("fragment_shape") - # REVIEW: h5: `get_storage_options`: new method to get file access options def get_storage_options(self): """Return `s3fs.S3FileSystem` options for accessing S3 fragment files. diff --git a/cf/data/array/mixin/indexmixin.py b/cf/data/array/mixin/indexmixin.py index 4cf2ad18b1..d105ba943a 100644 --- a/cf/data/array/mixin/indexmixin.py +++ b/cf/data/array/mixin/indexmixin.py @@ -7,7 +7,6 @@ from ....functions import indices_shape, parse_indices -# REVIEW: getitem: `IndexMixin`: new mixin class `IndexMixin` class IndexMixin: """Mixin class for lazy indexing of a data array. diff --git a/cf/data/array/netcdf4array.py b/cf/data/array/netcdf4array.py index ece5f3d3c4..095bf2d3ad 100644 --- a/cf/data/array/netcdf4array.py +++ b/cf/data/array/netcdf4array.py @@ -1,4 +1,3 @@ -# REVIEW: h5: `NetCDF4Array`: New class to access netCDF with `netCDF4`, replaces `NetCDFArray` import cfdm from ...mixin_container import Container @@ -47,8 +46,6 @@ def _lock(self): """ return netcdf_lock - # REVIEW: getitem: `_get_array`: Ignore this for h5 review - # REVIEW: getitem: `_get_array`: new method to convert subspace to numpy array def _get_array(self, index=None): """Returns a subspace of the dataset variable. diff --git a/cf/data/array/netcdfarray.py b/cf/data/array/netcdfarray.py index fab088f6f3..4642dff910 100644 --- a/cf/data/array/netcdfarray.py +++ b/cf/data/array/netcdfarray.py @@ -1,4 +1,3 @@ -# REVIEW: h5: `NetCDFArray`: Replaced by `NetCDF4Array` class NetCDFArray: """A netCDF array accessed with `netCDF4`. diff --git a/cf/data/array/umarray.py b/cf/data/array/umarray.py index bc30acb692..510b9c97ee 100644 --- a/cf/data/array/umarray.py +++ b/cf/data/array/umarray.py @@ -12,7 +12,6 @@ class UMArray( ): """A sub-array stored in a PP or UM fields file.""" - # REVIEW: h5: `__init__`: replace units/calendar API with attributes def __init__( self, filename=None, @@ -171,7 +170,6 @@ def __init__( # By default, close the UM file after data array access self._set_component("close", True, copy=False) - # REVIEW: getitem: `_get_array`: new method to convert subspace to numpy array def _get_array(self, index=None): """Returns a subspace of the dataset variable. @@ -272,7 +270,6 @@ def _get_rec(self, f, header_offset): # if r.hdr_offset == header_offset: # return r - # REVIEW: getitem: `_set_FillValue`: record _FillValue in attributes def _set_FillValue(self, int_hdr, real_hdr, attributes): """Set the ``_FillValue`` attribute. @@ -367,10 +364,8 @@ def _set_units(self, int_hdr, attributes): units = units0 break - # REVIEW: getitem: `_set_units`: record units in attributes attributes["units"] = units - # REVIEW: getitem: `_set_unpack`: record unpack in attributes def _set_unpack(self, int_hdr, real_hdr, attributes): """Set the ``add_offset`` and ``scale_factor`` attributes. diff --git a/cf/data/collapse/collapse_active.py b/cf/data/collapse/collapse_active.py index 97dc955d38..db8fc277f6 100644 --- a/cf/data/collapse/collapse_active.py +++ b/cf/data/collapse/collapse_active.py @@ -1,4 +1,3 @@ -# REVIEW: active: `collapse_active.py`: new module for active storage functionality import datetime import logging import time diff --git a/cf/data/collapse/dask_collapse.py b/cf/data/collapse/dask_collapse.py index b58f9daf00..9476ffcb0b 100644 --- a/cf/data/collapse/dask_collapse.py +++ b/cf/data/collapse/dask_collapse.py @@ -1,4 +1,3 @@ -# REVIEW: active: `dask_collapse.py`: all unlabelled changes in this module are general tidying, and should be reviewed at the same time as active storage """Reduction functions intended to be passed to be dask. Most of these functions are expected to be passed to @@ -231,7 +230,6 @@ def sum_sample_sizes(pairs, axis, computing_meta=False, **kwargs): # -------------------------------------------------------------------- # mean # -------------------------------------------------------------------- -# REVIEW: active: `cf_mean_chunk`: active storage decoration @actify("mean") def cf_mean_chunk( x, @@ -378,7 +376,6 @@ def cf_mean_agg( # -------------------------------------------------------------------- # maximum # -------------------------------------------------------------------- -# REVIEW: active: `cf_max_chunk`: active storage decoration @actify("max") def cf_max_chunk(x, dtype=None, computing_meta=False, **kwargs): """Chunk calculations for the maximum. @@ -533,7 +530,6 @@ def cf_mid_range_agg( # -------------------------------------------------------------------- # minimum # -------------------------------------------------------------------- -# REVIEW: active: `cf_min_chunk`: active storage decoration @actify("min") def cf_min_chunk(x, dtype=None, computing_meta=False, **kwargs): """Chunk calculations for the minimum. @@ -640,7 +636,6 @@ def cf_min_agg( # -------------------------------------------------------------------- # range # -------------------------------------------------------------------- -# REVIEW: active: `cf_range_chunk`: active storage decoration @actify("range") def cf_range_chunk(x, dtype=None, computing_meta=False, **kwargs): """Chunk calculations for the range. @@ -754,7 +749,6 @@ def cf_range_agg( # -------------------------------------------------------------------- # root mean square # -------------------------------------------------------------------- -# REVIEW: active: `cf_rms_chunk`: active storage decoration @actify("rms") def cf_rms_chunk(x, weights=None, dtype="f8", computing_meta=False, **kwargs): """Chunk calculations for the root mean square (RMS). @@ -839,7 +833,6 @@ def cf_rms_agg( # -------------------------------------------------------------------- # sample size # -------------------------------------------------------------------- -# REVIEW: active: `cf_sample_size_chunk`: active storage decoration @actify("sample_size") def cf_sample_size_chunk(x, dtype="i8", computing_meta=False, **kwargs): """Chunk calculations for the sample size. @@ -953,7 +946,6 @@ def cf_sample_size_agg( # -------------------------------------------------------------------- # sum # -------------------------------------------------------------------- -# REVIEW: active: `cf_sum_chunk`: active storage decoration @actify("sum") def cf_sum_chunk( x, @@ -1089,7 +1081,6 @@ def cf_sum_agg( # -------------------------------------------------------------------- # sum of weights # -------------------------------------------------------------------- -# REVIEW: active: `cf_sum_of_weights_chunk`: active storage decoration @actify("sum_of_weights") def cf_sum_of_weights_chunk( x, weights=None, dtype="f8", computing_meta=False, **kwargs @@ -1133,7 +1124,6 @@ def cf_sum_of_weights_chunk( # -------------------------------------------------------------------- # sum of squares of weights # -------------------------------------------------------------------- -# REVIEW: active: `cf_sum_of_weights2_chunk`: active storage decoration @actify("sum_of_weights2") def cf_sum_of_weights2_chunk( x, weights=None, dtype="f8", computing_meta=False, **kwargs @@ -1179,7 +1169,6 @@ def cf_sum_of_weights2_chunk( # -------------------------------------------------------------------- # unique # -------------------------------------------------------------------- -# REVIEW: active: `cf_unique_chunk`: active storage decoration @actify("unique") def cf_unique_chunk(x, dtype=None, computing_meta=False, **kwargs): """Chunk calculations for the unique values. @@ -1244,7 +1233,6 @@ def cf_unique_agg(pairs, axis=None, computing_meta=False, **kwargs): # -------------------------------------------------------------------- # variance # -------------------------------------------------------------------- -# REVIEW: active: `cf_var_chunk`: active storage decoration @actify("var") def cf_var_chunk( x, weights=None, dtype="f8", computing_meta=False, ddof=None, **kwargs diff --git a/cf/data/creation.py b/cf/data/creation.py index 0d4067a373..f8ddcb9b97 100644 --- a/cf/data/creation.py +++ b/cf/data/creation.py @@ -60,7 +60,6 @@ def to_dask(array, chunks, **from_array_options): if is_dask_collection(array): return array - # REVIEW: getitem: `to_dask`: set '_asanyarray' if hasattr(array, "to_dask_array"): try: return array.to_dask_array(chunks=chunks) @@ -83,8 +82,6 @@ def to_dask(array, chunks, **from_array_options): array = np.asanyarray(array) kwargs = from_array_options - # REVIEW: active: `to_dask`: '_dask_meta' renamed to '_meta' for consistency with Dask - # REVIEW: getitem: `to_dask`: The file lock is now on the `Array` object (in its `_get_array` method), rather than being set on the Dask array itself. kwargs.setdefault("meta", getattr(array, "_meta", None)) try: diff --git a/cf/data/dask_regrid.py b/cf/data/dask_regrid.py index 6c3cab5fc6..f825fe1488 100644 --- a/cf/data/dask_regrid.py +++ b/cf/data/dask_regrid.py @@ -2,7 +2,6 @@ import numpy as np -# REVIEW: getitem: `regrid.py`: import `cf_asanyarray` from .dask_utils import cf_asanyarray @@ -177,7 +176,6 @@ def regrid( """ weights, dst_mask = weights_dst_mask - # REVIEW: getitem: `regrid`: convert a to a usable array a = cf_asanyarray(a) if dst_mask is not None: dst_mask = cf_asanyarray(dst_mask) diff --git a/cf/data/dask_utils.py b/cf/data/dask_utils.py index c50e16d85f..8323bf696c 100644 --- a/cf/data/dask_utils.py +++ b/cf/data/dask_utils.py @@ -127,7 +127,6 @@ def cf_contains(a, value): value. """ - # REVIEW: getitem: `cf_contains`: convert a to a usable array a = cf_asanyarray(a) value = cf_asanyarray(value) return np.array(value in a).reshape((1,) * a.ndim) @@ -163,7 +162,6 @@ def cf_convolve1d(a, window=None, axis=-1, origin=0): Convolved float array with same shape as input. """ - # REVIEW: getitem: `cf_convolve1d`: convert a to a usable array a = cf_asanyarray(a) # Cast to float to ensure that NaNs can be stored @@ -207,7 +205,6 @@ def cf_harden_mask(a): The array with hardened mask. """ - # REVIEW: getitem: `cf_harden_mask`: convert a to a usable array a = cf_asanyarray(a) if np.ma.isMA(a): try: @@ -279,7 +276,6 @@ def cf_percentile(a, q, axis, method, keepdims=False, mtol=1): """ from math import prod - # REVIEW: getitem: `cf_percentile`: convert a to a usable array a = cf_asanyarray(a) if np.ma.isMA(a) and not np.ma.is_masked(a): @@ -375,7 +371,6 @@ def cf_soften_mask(a): The array with softened mask. """ - # REVIEW: getitem: `cf_soften_mask`: convert a to a usable array a = cf_asanyarray(a) if np.ma.isMA(a): @@ -433,7 +428,6 @@ def cf_where(array, condition, x, y, hardmask): elsewhere. """ - # REVIEW: getitem: `cf_where`: convert array, condition, x, y to usable arrays array = cf_asanyarray(array) condition = cf_asanyarray(condition) if x is not None: @@ -570,7 +564,6 @@ def cf_rt2dt(a, units): cftime.DatetimeGregorian(2000, 1, 2, 0, 0, 0, 0, has_year_zero=False)] """ - # REVIEW: getitem: `cf_rt2dt`: convert a to a usable array a = cf_asanyarray(a) if not units.iscalendartime: return rt2dt(a, units_in=units) @@ -626,7 +619,6 @@ def cf_dt2rt(a, units): [365 366] """ - # REVIEW: getitem: `cf_dt2rt`: convert a to a usable array a = cf_asanyarray(a) return dt2rt(a, units_out=units, units_in=None) @@ -668,7 +660,6 @@ def cf_units(a, from_units, to_units): [1000. 2000.] """ - # REVIEW: getitem: `cf_units`: convert a to a usable array a = cf_asanyarray(a) return Units.conform( a, from_units=from_units, to_units=to_units, inplace=False @@ -693,7 +684,6 @@ def cf_is_masked(a): values. """ - # REVIEW: getitem: `cf_is_masked`: convert a to a usable array a = cf_asanyarray(a) out = np.ma.is_masked(a) return np.array(out).reshape((1,) * a.ndim) @@ -727,12 +717,10 @@ def cf_filled(a, fill_value=None): [[-999 2 3]] """ - # REVIEW: getitem: `cf_filled`: convert a to a usable array a = cf_asanyarray(a) return np.ma.filled(a, fill_value=fill_value) -# REVIEW: getitem: `cf_asanyarray`: convert a to a usable array def cf_asanyarray(a): """Convert to a `numpy` array. @@ -752,7 +740,6 @@ def cf_asanyarray(a): unchanged if ``a.__asanyarray__`` False. """ - # REVIEW: getitem: `cf_asanyarray`: convert a to a usable array if getattr(a, "__asanyarray__", False): return np.asanyarray(a) diff --git a/cf/data/data.py b/cf/data/data.py index d310e37aec..61abf86bc7 100644 --- a/cf/data/data.py +++ b/cf/data/data.py @@ -44,7 +44,6 @@ from .collapse import Collapse from .creation import generate_axis_identifiers, to_dask -# REVIEW: getitem: `data.py`: import cf_asanyarray, cf_filled, cf_is_masked from .dask_utils import ( _da_ma_allclose, cf_asanyarray, @@ -375,7 +374,6 @@ def __init__( source=source, _use_array=_use_array and array is not None ) if _use_array: - # REVIEW: getitem: `__init__`: set 'asanyarray' try: array = source.to_dask_array(_asanyarray=False) except (AttributeError, TypeError): @@ -479,7 +477,6 @@ def __init__( is_dask = is_dask_collection(array) custom["deterministic"] = not is_dask - # REVIEW: getitem: `__init__`: Set whether or not to call `np.asanyarray` on chunks to convert them to numpy arrays. # Set whether or not to call `np.asanyarray` on chunks to # convert them to numpy arrays. if is_dask: @@ -513,7 +510,6 @@ def __init__( # Reset the units self._Units = units - # REVIEW: getitem: `__init__`: set 'asanyarray' # Store the dask array self._set_dask(dx, clear=_NONE, asanyarray=None) @@ -636,7 +632,6 @@ def __contains__(self, value): f"not {value!r}" ) - # REVIEW: getitem: `cf_contains`: set 'asanyarray' # If value is a scalar Data object then conform its units if isinstance(value, self.__class__): self_units = self.Units @@ -789,7 +784,6 @@ def __len__(self): TypeError: len() of unsized object """ - # REVIEW: getitem: `__len__`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -898,7 +892,6 @@ def __getitem__(self, indices): # ------------------------------------------------------------ # Roll axes with cyclic slices # ------------------------------------------------------------ - # REVIEW: getitem: `__getitem__`: set 'asanyarray' if roll: # For example, if slice(-2, 3) has been requested on a # cyclic axis, then we roll that axis by two points and @@ -958,7 +951,6 @@ def __getitem__(self, indices): "Non-orthogonal indexing has not yet been implemented" ) - # REVIEW: getitem: `__getitem__`: set 'asanyarray=True' because subspaced chunks might not be in memory # ------------------------------------------------------------ # Set the subspaced dask array # @@ -1181,7 +1173,6 @@ def __setitem__(self, indices, value): return - # REVIEW: getitem: `__asanyarray__`: new property `__asanyarray__` @property def __asanyarray__(self): """Whether or not chunks need conversion to `numpy` arrays. @@ -1410,7 +1401,6 @@ def _clear_after_dask_update(self, clear=_ALL): # Set the CFA write status to False self._cfa_del_write() - # REVIEW: getitem: `_set_dask`: new keyword 'asanyarray' def _set_dask(self, dx, copy=False, clear=_ALL, asanyarray=False): """Set the dask array. @@ -1469,7 +1459,6 @@ def _set_dask(self, dx, copy=False, clear=_ALL, asanyarray=False): custom = self._custom custom["dask"] = dx - # REVIEW: getitem: `_set_dask`: set '__asanyarray__' if asanyarray is not None: custom["__asanyarray__"] = bool(asanyarray) @@ -2545,7 +2534,6 @@ def percentile( else: axes = tuple(sorted(d._parse_axes(axes))) - # REVIEW: getitem: `percentile`: set 'asanyarray' # 'cf_percentile' has its own call to 'cf_asanyarray', so we # can set '_asanyarray=False'. dx = d.to_dask_array(_asanyarray=False) @@ -3045,7 +3033,6 @@ def convolution_filter( dx = d.to_dask_array() - # REVIEW: getitem: `percentile`: rectify comment # Cast to float to ensure that NaNs can be stored (so # map_overlap can correctly assign the halos) if dx.dtype != float: @@ -3233,7 +3220,6 @@ def rechunk( """ d = _inplace_enabled_define_and_cleanup(self) - # REVIEW: getitem: `rechunk`: set 'asanyarray' dx = d.to_dask_array(_asanyarray=False) dx = dx.rechunk(chunks, threshold, block_size_limit, balance) # Dask rechunking is essentially a wrapper for __getitem__ @@ -3291,7 +3277,6 @@ def _asdatetime(self, inplace=False): ) if not d._isdatetime(): - # REVIEW: getitem: `_asdatetime`: set 'asanyarray' # 'cf_rt2dt' has its own call to 'cf_asanyarray', so we # can set '_asanyarray=False'. dx = d.to_dask_array(_asanyarray=False) @@ -3349,7 +3334,6 @@ def _asreftime(self, inplace=False): ) if d._isdatetime(): - # REVIEW: getitem: `_asreftime`: set 'asanyarray' # 'cf_dt2rt' has its own call to 'cf_asanyarray', so we # can set '_asanyarray=False'. dx = d.to_dask_array(_asanyarray=False) @@ -3962,7 +3946,6 @@ def _regrid( f"the shape of the regrid operator: {operator.src_shape}" ) - # REVIEW: getitem: `_regrid`: set 'asanyarray' # 'regrid' has its own calls to 'cf_asanyarray', so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -4207,7 +4190,6 @@ def concatenate( processed_data.append(data1) copied = not copy # to avoid making two copies in a given case - # REVIEW: getitem: `concatenate`: set 'asanyarray' # Get data as dask arrays and apply concatenation # operation. We can set '_asanyarray=False' because at compute # time the concatenation operation does not need to access the @@ -4240,7 +4222,6 @@ def concatenate( cfa = _NONE break - # REVIEW: getitem: `concatenate`: define the asanyarray status # Define the __asanyarray__ status asanyarray = processed_data[0].__asanyarray__ for d in processed_data[1:]: @@ -4251,7 +4232,6 @@ def concatenate( asanyarray = True break - # REVIEW: getitem: `concatenate`: set 'asanyarray' # Set the new dask array data0._set_dask(dx, clear=_ALL ^ cfa, asanyarray=asanyarray) @@ -4897,7 +4877,6 @@ def chunks(self): 6 """ - # REVIEW: getitem: `chunks`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. return self.to_dask_array(_asanyarray=False).chunks @@ -4955,7 +4934,6 @@ def Units(self, value): cf_func = partial(cf_units, from_units=old_units, to_units=value) - # REVIEW: getitem: `Units`: set 'asanyarray' # 'cf_units' has its own call to 'cf_asanyarray', so we can # set '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -5025,7 +5003,6 @@ def dtype(self): [1 2 3] """ - # REVIEW: getitem: `dtype`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -5141,7 +5118,6 @@ def is_masked(self): True """ - # REVIEW: getitem: `is_masked`: set 'asanyarray' # 'cf_is_masked' has its own call to 'cf_asanyarray', so we # can set '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -5187,7 +5163,6 @@ def nbytes(self): 24 """ - # REVIEW: getitem: `nbytes`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -5224,7 +5199,6 @@ def ndim(self): 0 """ - # REVIEW: getitem: `ndim`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -5249,7 +5223,6 @@ def npartitions(self): 6 """ - # REVIEW: getitem: `npartitions`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. return self.to_dask_array(_asanyarray=False).npartitions @@ -5273,7 +5246,6 @@ def numblocks(self): 6 """ - # REVIEW: getitem: `numblocks` set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. return self.to_dask_array(_asanyarray=False).numblocks @@ -5306,7 +5278,6 @@ def shape(self): () """ - # REVIEW: getitem: `shape`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -5348,7 +5319,6 @@ def size(self): 1 """ - # REVIEW: getitem: `size` set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -6547,7 +6517,6 @@ def convert_reference_time( ) d.Units = units0 - # REVIEW: getitem: `convert_reference_time`: set 'asanyarray' # 'cf_rt2dt' its own call to 'cf_asanyarray', so we can set # '_asanyarray=False'. dx = d.to_dask_array(_asanyarray=False) @@ -6628,7 +6597,6 @@ def get_deterministic_name(self): units = self._Units - # REVIEW: getitem: `get_deterministic_name`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. return tokenize( @@ -6698,7 +6666,6 @@ def get_filenames(self): """ out = set() - # REVIEW: getitem: `get_filenames`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. for a in self.todict(_asanyarray=False).values(): @@ -6835,7 +6802,6 @@ def add_file_location(self, location): updated = False - # REVIEW: getitem: `add_file_location`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dsk = self.todict(_asanyarray=False) @@ -8279,7 +8245,6 @@ def unique(self, split_every=None): # in the result. d.soften_mask() - # REVIEW: getitem: `unique`: set 'asanyarray' # The applicable chunk function will have its own call to # 'cf_asanyarray', so we can set '_asanyarray=False'. dx = d.to_dask_array(_asanyarray=False) @@ -9028,7 +8993,6 @@ def harden_mask(self): [1 -- 3] """ - # REVIEW: getitem: `hardmask`: set 'asanyarray' # 'cf_harden_mask' has its own call to 'cf_asanyarray', so we # can set '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -9151,7 +9115,6 @@ def soften_mask(self): [ 1 999 3] """ - # REVIEW: getitem: `soften_mask`: set 'asanyarray' # 'cf_soften_mask' has its own call to 'cf_asanyarray', so we # can set '_asanyarray=False'. dx = self.to_dask_array(_asanyarray=False) @@ -9183,7 +9146,6 @@ def file_locations(self): """ out = set() - # REVIEW: getitem: `file_locations`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. for key, a in self.todict(_asanyarray=False).items(): @@ -9244,7 +9206,6 @@ def filled(self, fill_value=None, inplace=False): f"data type {d.dtype.str!r}" ) - # REVIEW: getitem: `filled`: set 'asanyarray' # 'cf_filled' has its own call to 'cf_asanyarray', so we can # set '_asanyarray=False'. dx = d.to_dask_array(_asanyarray=False) @@ -9876,7 +9837,6 @@ def override_calendar(self, calendar, inplace=False, i=False): d._Units = Units(d.Units._units, calendar) return d - # REVIEW: getitem: `to_dask_array`: new keyword 'asanyarray' def to_dask_array(self, apply_mask_hardness=False, _asanyarray=True): """Convert the data to a `dask` array. @@ -10216,7 +10176,6 @@ def del_file_location(self, location): updated = False - # REVIEW: getitem: `del_file_location`: set 'asanyarray' # The dask graph is never going to be computed, so we can set # '_asanyarray=False'. dsk = self.todict(_asanyarray=False) @@ -11401,7 +11360,6 @@ def where( # Missing values could be affected, so make sure that the mask # hardness has been applied. # - # REVIEW: getitem: `where`: set 'asanyarray' # 'cf_where' has its own calls to 'cf_asanyarray', so we can # set '_asanyarray=False'. dx = d.to_dask_array(apply_mask_hardness=True, _asanyarray=False) @@ -11419,7 +11377,6 @@ def where( condition = type(self).asdata(condition) condition = where_broadcastable(d, condition, "condition") - # REVIEW: getitem: `where`: set 'asanyarray' # 'cf_where' has its own calls to 'cf_asanyarray', so we can # set '_asanyarray=False'. condition = condition.to_dask_array(_asanyarray=False) @@ -11465,7 +11422,6 @@ def where( x, y = xy - # REVIEW: getitem: `where`: 'da.asanyarray' is no longer required # Apply the where operation dx = da.core.elemwise(cf_where, dx, condition, x, y, d.hardmask) d._set_dask(dx) @@ -11692,7 +11648,6 @@ def cull_graph(self): ('array-21ea057f160746a3d3f0943bba945460', 0): array([1, 2, 3])} """ - # REVIEW: getitem: `cull_graph`: set 'asanyarray' dx = self.to_dask_array(_asanyarray=False) dsk, _ = cull(dx.dask, dx.__dask_keys__()) dx = da.Array(dsk, name=dx.name, chunks=dx.chunks, dtype=dx.dtype) @@ -11972,7 +11927,6 @@ def tan(self, inplace=False, i=False): return d - # REVIEW: getitem: `todict`: new keywords 'apply_mask_hardness', 'asanyarray' def todict( self, optimize_graph=True, apply_mask_hardness=False, _asanyarray=True ): diff --git a/cf/data/fragment/__init__.py b/cf/data/fragment/__init__.py index fd82cfa9cf..b7315107d4 100644 --- a/cf/data/fragment/__init__.py +++ b/cf/data/fragment/__init__.py @@ -1,9 +1,5 @@ from .fullfragmentarray import FullFragmentArray - -# REVIEW: h5: `__init__.py`: import `H5netcdfFragmentArray` from .h5netcdffragmentarray import H5netcdfFragmentArray from .netcdffragmentarray import NetCDFFragmentArray - -# REVIEW: h5: `__init__.py`: import `NetCDF4FragmentArray` from .netcdf4fragmentarray import NetCDF4FragmentArray from .umfragmentarray import UMFragmentArray diff --git a/cf/data/fragment/fullfragmentarray.py b/cf/data/fragment/fullfragmentarray.py index 652c7806cf..e2855b3003 100644 --- a/cf/data/fragment/fullfragmentarray.py +++ b/cf/data/fragment/fullfragmentarray.py @@ -9,7 +9,6 @@ class FullFragmentArray(FragmentArrayMixin, FullArray): """ - # REVIEW: h5: `__init__`: replace units/calendar API with attributes def __init__( self, fill_value=None, diff --git a/cf/data/fragment/h5netcdffragmentarray.py b/cf/data/fragment/h5netcdffragmentarray.py index 6ae379c984..0b70976c7f 100644 --- a/cf/data/fragment/h5netcdffragmentarray.py +++ b/cf/data/fragment/h5netcdffragmentarray.py @@ -2,7 +2,6 @@ from .mixin import FragmentArrayMixin -# REVIEW: h5: `H5netcdfFragmentArray`: New class to access netCDF fragment with `h5netcdf` class H5netcdfFragmentArray(FragmentArrayMixin, H5netcdfArray): """A netCDF fragment array accessed with `h5netcdf`. diff --git a/cf/data/fragment/mixin/fragmentarraymixin.py b/cf/data/fragment/mixin/fragmentarraymixin.py index 85e844201e..f02c779002 100644 --- a/cf/data/fragment/mixin/fragmentarraymixin.py +++ b/cf/data/fragment/mixin/fragmentarraymixin.py @@ -12,7 +12,6 @@ class FragmentArrayMixin: """ - # REVIEW: getitem: `_get_array`: new method to convert subspace to numpy array def _get_array(self, index=None): """Returns a subspace of the dataset variable. @@ -166,7 +165,6 @@ def _size_1_axis(self, indices): None """ - # REVIEW: getitem: `_size_1_axis`: refactor to use `original_shape` original_shape = self.original_shape if original_shape.count(1): return original_shape.index(1) diff --git a/cf/data/fragment/netcdf4fragmentarray.py b/cf/data/fragment/netcdf4fragmentarray.py index 869c083676..91f87dc2e8 100644 --- a/cf/data/fragment/netcdf4fragmentarray.py +++ b/cf/data/fragment/netcdf4fragmentarray.py @@ -2,7 +2,6 @@ from .mixin import FragmentArrayMixin -# REVIEW: h5: `NetCDF4FragmentArray`: New class to access netCDF fragment with `netCDF4` class NetCDF4FragmentArray(FragmentArrayMixin, NetCDF4Array): """A netCDF fragment array accessed with `netCDF4`. diff --git a/cf/data/fragment/netcdffragmentarray.py b/cf/data/fragment/netcdffragmentarray.py index c5365098c9..ee34501e94 100644 --- a/cf/data/fragment/netcdffragmentarray.py +++ b/cf/data/fragment/netcdffragmentarray.py @@ -7,7 +7,6 @@ from .netcdf4fragmentarray import NetCDF4FragmentArray -# REVIEW: getitem: `NetCDFFragmentArray`: new inheritance to allow for different netCDF backends class NetCDFFragmentArray( FragmentArrayMixin, IndexMixin, @@ -24,7 +23,6 @@ class NetCDFFragmentArray( """ - # REVIEW: h5: `__init__`: replace units/calendar API with attributes def __init__( self, filename=None, @@ -176,7 +174,6 @@ def __init__( # By default, close the file after data array access self._set_component("close", True, copy=False) - # REVIEW: getitem: `_get_array`: new method to convert subspace to numpy array def _get_array(self, index=None): """Returns a subspace of the dataset variable. diff --git a/cf/data/fragment/umfragmentarray.py b/cf/data/fragment/umfragmentarray.py index b45bf44bf9..9168225945 100644 --- a/cf/data/fragment/umfragmentarray.py +++ b/cf/data/fragment/umfragmentarray.py @@ -9,8 +9,6 @@ class UMFragmentArray(FragmentArrayMixin, UMArray): """ - # REVIEW: h5: `__init__`: replace units/calendar API with attributes - # REVIEW: h5: `__init__`: new keyword 'storage_options' def __init__( self, filename=None, diff --git a/cf/data/utils.py b/cf/data/utils.py index 2c34757c22..c7982fc857 100644 --- a/cf/data/utils.py +++ b/cf/data/utils.py @@ -869,8 +869,6 @@ def collapse( "keepdims": keepdims, "split_every": split_every, "mtol": mtol, - # REVIEW: active: `collapse`: pass the active storage status onto the collapse functions - # "active_storage": d.active_storage, } weights = parse_weights(d, weights, axis) @@ -880,7 +878,6 @@ def collapse( if ddof is not None: kwargs["ddof"] = ddof - # REVIEW: getitem: `collapse`: set 'asanyarray' # The applicable chunk function will have its own call to # 'cf_asanyarray', so we can set '_asanyarray=False'. Also, setting # _asanyarray=False will ensure that any active storage operations @@ -998,7 +995,6 @@ def parse_weights(d, weights, axis=None): w = [] shape = d.shape axes = d._axes - # REVIEW: active: `parse_weights`: minor refactor Data = type(d) for key, value in weights.items(): value = Data.asdata(value) diff --git a/cf/field.py b/cf/field.py index 4d735a11c7..d73e2b7b49 100644 --- a/cf/field.py +++ b/cf/field.py @@ -5220,7 +5220,6 @@ def histogram(self, digitized): """ raise RuntimeError("Use cf.histogram instead.") - # REVIEW: active: active storage docstring @_deprecated_kwarg_check("i", version="3.0.0", removed_at="4.0.0") @_manage_log_level_via_verbosity def collapse( @@ -7101,7 +7100,6 @@ def collapse( "collapse" ) - # REVIEW: active: `collapse`: include size 1 axes in collapse # Note: It is important that size 1 axes are also passed # on to the Data collapse, because active storage # collapses get confused if they're not there. diff --git a/cf/functions.py b/cf/functions.py index da04b18f58..22820bc3db 100644 --- a/cf/functions.py +++ b/cf/functions.py @@ -163,7 +163,6 @@ def _free_memory(): return float(virtual_memory().available) -# REVIEW: active: `configuration`: new keywords 'active_storage', 'active_storage_url', 'active_storage_max_requests' def configuration( atol=None, rtol=None, @@ -425,7 +424,6 @@ def configuration( ) -# REVIEW: active: `_configuration`: new keywords 'active_storage', 'active_storage_url', 'active_storage_max_requests' def _configuration(_Configuration, **kwargs): """Internal helper function to provide the logic for `cf.configuration`. @@ -584,7 +582,6 @@ class log_level(ConstantAccess, cfdm.log_level): _reset_log_emergence_level = _reset_log_emergence_level -# REVIEW: active: `regrid_logging`: new examples class regrid_logging(ConstantAccess): """Whether or not to enable `esmpy` regridding logging. @@ -716,7 +713,6 @@ def _parse(cls, arg): ) # pragma: no cover -# REVIEW: active: `relaxed_identities`: new examples class relaxed_identities(ConstantAccess): """Use 'relaxed' mode when getting a construct identity. @@ -850,7 +846,6 @@ def _parse(cls, arg): return parse_bytes(arg) -# REVIEW: active: `tempdir`: new examples class tempdir(ConstantAccess): """The directory for internally generated temporary files. @@ -1202,7 +1197,6 @@ def _parse(cls, arg): return arg -# REVIEW: active: `active_storage`: new function class active_storage(ConstantAccess): """Whether or not to attempt active storage reductions. @@ -1272,7 +1266,6 @@ def _parse(cls, arg): return bool(arg) -# REVIEW: active: `active_storage_url`: new function class active_storage_url(ConstantAccess): """The URL location of the active storage reducer. @@ -1337,7 +1330,6 @@ def _parse(cls, arg): return str(arg) -# REVIEW: active: `active_storage_max_requests`: new function class active_storage_max_requests(ConstantAccess): """Concurrent active storage server requests per `dask` chunk. @@ -2446,9 +2438,6 @@ def normalize_slice(index, size, cyclic=False): return slice(start, stop, step) -# REVIEW: getitem: `get_subspace`: remove deprecated function - - _equals = cfdm.Data()._equals @@ -2897,7 +2886,6 @@ def relpath(filename, start=None): 'http://data/archive/file.nc' """ - # REVIEW: h5: `relpath`: minor refactor u = urlparse(filename) if u.scheme != "": return filename @@ -2936,7 +2924,6 @@ def dirname(filename): 'http://data/archive' """ - # REVIEW: h5: `relpath`: minor refactor u = urlparse(filename) if u.scheme != "": return filename.rpartition("/")[0] @@ -2976,7 +2963,6 @@ def pathjoin(path1, path2): 'http://data/archive/file.nc' """ - # REVIEW: h5: `relpath`: minor refactor u = urlparse(path1) if u.scheme != "": return urljoin(path1, path2) @@ -3347,7 +3333,6 @@ def _get_module_info(module, alternative_name=False, try_except=False): ) -# REVIEW: h5: `environment`: new dependencies def environment(display=True, paths=True): """Return the names and versions of the cf package and its dependencies. diff --git a/cf/read_write/netcdf/netcdfread.py b/cf/read_write/netcdf/netcdfread.py index dd0fb89c1e..883cc7b5a2 100644 --- a/cf/read_write/netcdf/netcdfread.py +++ b/cf/read_write/netcdf/netcdfread.py @@ -209,7 +209,6 @@ def _create_data( if data.npartitions == 1: data._cfa_set_write(True) - # REVIEW: h5: `_create_data`: control caching if ( not compression_index and self.read_vars.get("cache") @@ -254,7 +253,6 @@ def _create_data( coord_ncvar=coord_ncvar, ) - # REVIEW: h5: `_create_data`: replace units/calendar API with attributes attributes = kwargs["attributes"] data = self._create_Data( cfa_array, @@ -263,7 +261,6 @@ def _create_data( calendar=attributes.get("calendar"), ) - # REVIEW: h5: `_create_data`: don't cache data from CFA variables # Note: We don't cache elements from CFA variables, because # the data are in fragment files which have not been # opened and may not not even be openable (such as could @@ -624,7 +621,6 @@ def _cache_data_elements(self, data, ncvar): # Store the elements in the data object data._set_cached_elements(elements) - # REVIEW: h5: `_create_cfanetcdfarray`: docstring/comment improvements def _create_cfanetcdfarray( self, ncvar, @@ -699,7 +695,6 @@ def _create_cfanetcdfarray( kwargs["x"] = aggregation_instructions kwargs["instructions"] = " ".join(sorted(instructions)) - # REVIEW: h5: `_create_cfanetcdfarray`: choose the correct netCDF backend # Use the kwargs to create a CFANetCDFArray instance if g["original_netCDF4"]: array = self.implementation.initialise_CFANetCDF4Array(**kwargs) @@ -753,7 +748,6 @@ def _create_cfanetcdfarray_term( return_kwargs_only=True, ) - # REVIEW: h5: `_create_cfanetcdfarray_term`: fix unknown fragment shape # Get rid of the incorrect shape. This will end up getting set # correctly by the CFANetCDFArray instance. kwargs.pop("shape", None) @@ -772,7 +766,6 @@ def _create_cfanetcdfarray_term( kwargs["x"] = aggregation_instructions kwargs["instructions"] = " ".join(sorted(instructions)) - # REVIEW: h5: `_create_cfanetcdfarray_term`: choose the correct netCDF backend if g["original_netCDF4"]: array = self.implementation.initialise_CFANetCDF4Array(**kwargs) else: @@ -962,7 +955,6 @@ def _cfa_parse_aggregated_data(self, ncvar, aggregated_data): aggregation_instructions = g["cfa_aggregation_instructions"] variable_attributes = g["variable_attributes"] - # REVIEW: h5: `_cfa_parse_aggregated_data`: use `cfdm.netcdf_indexer` to get data # Loop round aggregation instruction terms out = {} for x in self._parse_x( diff --git a/cf/read_write/netcdf/netcdfwrite.py b/cf/read_write/netcdf/netcdfwrite.py index 256c4b1392..e2c8da0236 100644 --- a/cf/read_write/netcdf/netcdfwrite.py +++ b/cf/read_write/netcdf/netcdfwrite.py @@ -579,8 +579,6 @@ def _create_cfa_data(self, ncvar, ncdimensions, data, cfvar): }, ) - # REVIEW: h5: Deleted function _convert_to_builtin_type was a CFA-0.4 thing - def _check_valid(self, array, cfvar=None, attributes=None): """Checks for array values outside of the valid range. @@ -749,7 +747,6 @@ def _cfa_write_non_standard_terms( # more than one unique value then the fragment's value is # missing data. # - # REVIEW: getitem: `_cfa_write_non_standard_terms`: set '_asanyarray' # '_cfa_unique' has its own call to 'cf_asanyarray', so # we can set '_asanyarray=False'. dx = data.to_dask_array(_asanyarray=False) @@ -810,7 +807,6 @@ def _cfa_unique(cls, a): data if there is not a unique value. """ - # REVIEW: getitem: `_cfa_unique`: convert a to a usable array a = cf_asanyarray(a) out_shape = (1,) * a.ndim @@ -966,7 +962,6 @@ def _cfa_aggregation_instructions(self, data, cfvar): # Create the location array # ------------------------------------------------------------ dtype = np.dtype(np.int32) - # REVIEW: getitem: `_cfa_aggregation_instructions`: set 'asanyarray' if ( max(data.to_dask_array(_asanyarray=False).chunksize) > np.iinfo(dtype).max diff --git a/cf/read_write/read.py b/cf/read_write/read.py index 3e1f2ec6ae..c6ad881db9 100644 --- a/cf/read_write/read.py +++ b/cf/read_write/read.py @@ -58,17 +58,13 @@ def read( select_options=None, follow_symlinks=False, mask=True, - # REVIEW: h5: `read`: new 'unpack' parameter to control auto-unpacking (previously always True) unpack=True, warn_valid=False, chunks="auto", domain=False, cfa=None, - # REVIEW: h5: `read`: new 'netcdf_backend' parameter to control how to read files netcdf_backend=None, - # REVIEW: h5: `read`: new 'storage_options' parameter to control access to S3 storage_options=None, - # REVIEW: h5: `read`: 'cache' parameter to control whether or not to get to cache selected data elements cache=True, ): """Read field or domain constructs from files. diff --git a/cf/read_write/um/umread.py b/cf/read_write/um/umread.py index 9efbb5644e..e73166eba1 100644 --- a/cf/read_write/um/umread.py +++ b/cf/read_write/um/umread.py @@ -1973,7 +1973,6 @@ def create_data(self): recs = self.recs um_Units = self.um_Units - # REVIEW: h5: `create_data`: replace units/calendar API with attributes attributes = { "units": getattr(um_Units, "units", None), "calendar": getattr(um_Units, "calendar", None), diff --git a/cf/read_write/write.py b/cf/read_write/write.py index a2b7ed114b..23a8dda3cd 100644 --- a/cf/read_write/write.py +++ b/cf/read_write/write.py @@ -13,7 +13,6 @@ netcdf = NetCDFWrite(implementation()) -# REVIEW: h5: `write`: docstring improvements @_manage_log_level_via_verbosity def write( fields, diff --git a/cf/regrid/regrid.py b/cf/regrid/regrid.py index fd39bed21f..a06d1cc960 100644 --- a/cf/regrid/regrid.py +++ b/cf/regrid/regrid.py @@ -2466,7 +2466,6 @@ def create_esmpy_weights( from .. import __version__ - # REVIEW: h5: new name and location of file lock from ..data.array.locks import netcdf_lock if ( diff --git a/cf/regrid/regridoperator.py b/cf/regrid/regridoperator.py index 3272aaf835..10a77bc641 100644 --- a/cf/regrid/regridoperator.py +++ b/cf/regrid/regridoperator.py @@ -727,7 +727,6 @@ def tosparse(self): # Read the weights from the weights file from netCDF4 import Dataset - # REVIEW: h5: new name and location of file lock from ..data.array.locks import netcdf_lock netcdf_lock.acquire()