diff --git a/rten-examples/src/detr.rs b/rten-examples/src/detr.rs index df927d65..2e2670bd 100644 --- a/rten-examples/src/detr.rs +++ b/rten-examples/src/detr.rs @@ -83,7 +83,7 @@ Options: Ok(args) } -/// Resolve the min/max size to use as inputs for [rescaled_size] based on +/// Resolve the min/max size to use as inputs for [`rescaled_size`] based on /// the min/max CLI args and defaults for the model configuration. fn resolve_min_max_size( min: Option, diff --git a/rten-imageproc/src/contours.rs b/rten-imageproc/src/contours.rs index 578ec010..c3c3e609 100644 --- a/rten-imageproc/src/contours.rs +++ b/rten-imageproc/src/contours.rs @@ -64,7 +64,7 @@ fn find_nonzero_neighbor< None } -/// Specifies which contours to extract from a mask in [find_contours]. +/// Specifies which contours to extract from a mask in [`find_contours`]. pub enum RetrievalMode { /// Get only the outer-most contours. External, diff --git a/rten-imageproc/src/drawing.rs b/rten-imageproc/src/drawing.rs index 47e34b3a..69e794b1 100644 --- a/rten-imageproc/src/drawing.rs +++ b/rten-imageproc/src/drawing.rs @@ -214,7 +214,7 @@ pub fn draw_polygon( } } -/// Tracks data about an edge in a polygon being traversed by [FillIter]. +/// Tracks data about an edge in a polygon being traversed by [`FillIter`]. #[derive(Clone, Copy, Debug)] struct Edge { /// Y coordinate where this edge starts @@ -244,7 +244,7 @@ struct Edge { } /// Iterator over coordinates of pixels that fill a polygon. See -/// [Polygon::fill_iter] for notes on how this iterator determines which +/// [`Polygon::fill_iter`] for notes on how this iterator determines which /// pixels are inside the polygon. /// /// The implementation follows . @@ -434,7 +434,7 @@ impl<'a, T: Copy + Default> Painter<'a, T> { self.saved_states.push(self.state); } - /// Pop and apply a drawing style from the stack created with [Painter::save]. + /// Pop and apply a drawing style from the stack created with [`Painter::save`]. pub fn restore(&mut self) { if let Some(state) = self.saved_states.pop() { self.state = state; @@ -445,7 +445,7 @@ impl<'a, T: Copy + Default> Painter<'a, T> { /// style. /// /// This avoids the need to manually save and restore state with - /// [Painter::save] and [Painter::restore]. + /// [`Painter::save`] and [`Painter::restore`]. pub fn with_save(&mut self, f: F) { self.save(); f(self); diff --git a/rten-imageproc/src/poly_algos.rs b/rten-imageproc/src/poly_algos.rs index 94ac8a66..64cf3e11 100644 --- a/rten-imageproc/src/poly_algos.rs +++ b/rten-imageproc/src/poly_algos.rs @@ -136,7 +136,7 @@ pub fn simplify_polyline(points: &[PointF], epsilon: f32) -> Vec { /// Return a simplified version of the polygon defined by `points`. /// -/// This is very similar to [simplify_polyline] except that the input is +/// This is very similar to [`simplify_polyline`] except that the input is /// treated as a polygon where the last point implicitly connects to the first /// point to close the shape. pub fn simplify_polygon(points: &[PointF], epsilon: f32) -> Vec { diff --git a/rten-imageproc/src/shapes.rs b/rten-imageproc/src/shapes.rs index 3b45d0a9..b9bed405 100644 --- a/rten-imageproc/src/shapes.rs +++ b/rten-imageproc/src/shapes.rs @@ -982,10 +982,10 @@ impl> Polygon { /// Return true if the pixel with coordinates `p` lies inside the polygon. /// - /// The intent of this function is to align with [Polygon::fill_iter] such + /// The intent of this function is to align with [`Polygon::fill_iter`] such /// that `polygon.contains_pixel(p)` is equivalent to /// `polygon.fill_iter().any(|fp| fp == p)` but faster because it doesn't - /// iterate over every pixel inside the polygon. See [Polygon::fill_iter] + /// iterate over every pixel inside the polygon. See [`Polygon::fill_iter`] /// for notes on how the inside/outisde status of a pixel is determined. pub fn contains_pixel(&self, p: Point) -> bool { let mut edge_crossings = 0; @@ -1126,7 +1126,7 @@ impl Default for Polygons { } } -/// Iterator over polygons in a [Polygons] collection. +/// Iterator over polygons in a [`Polygons`] collection. pub struct PolygonsIter<'a> { points: &'a [Point], polygons: Iter<'a, Range>, diff --git a/rten-simd/src/dispatch.rs b/rten-simd/src/dispatch.rs index 2ec4219f..b105f6b7 100644 --- a/rten-simd/src/dispatch.rs +++ b/rten-simd/src/dispatch.rs @@ -76,7 +76,7 @@ impl SimdDispatcher { /// Trait for SIMD operations which can be evaluated using different SIMD /// vector types. /// -/// To dispatch the operation, create a [SimdDispatcher] and call +/// To dispatch the operation, create a [`SimdDispatcher`] and call /// [`dispatch(op)`](SimdDispatcher::dispatch). pub trait SimdOp { /// Evaluate the operator using a given SIMD vector type. @@ -99,7 +99,7 @@ pub trait SimdUnaryOp { unsafe fn eval(&self, x: S) -> S; } -/// Apply a vectorized unary function to elements of `input` using [simd_map]. +/// Apply a vectorized unary function to elements of `input` using [`simd_map`]. pub fn dispatch_map_op(input: &[f32], out: &mut [MaybeUninit], op: Op) { let wrapped_op = SimdMapOp::wrap(input.into(), out.into(), op, 0. /* pad */); let dispatcher = SimdDispatcher::default(); @@ -115,7 +115,7 @@ pub fn dispatch_map_op_in_place(input: &mut [f32], op: Op) { } /// SIMD operation which applies a unary operator `Op` to all elements in -/// an input buffer using [simd_map]. +/// an input buffer using [`simd_map`]. pub struct SimdMapOp { input: PtrLen, output: MutPtrLen>, diff --git a/rten-tensor/src/copy.rs b/rten-tensor/src/copy.rs index 813e4bb2..cb3ab2e0 100644 --- a/rten-tensor/src/copy.rs +++ b/rten-tensor/src/copy.rs @@ -9,7 +9,7 @@ use crate::{ Matrix, MatrixLayout, MatrixMut, NdTensorView, NdTensorViewMut, TensorView, TensorViewMut, }; -/// Iterator returned by [range_chunks]. +/// Iterator returned by [`range_chunks`]. pub struct RangeChunks { remainder: Range, chunk_size: usize, @@ -91,7 +91,7 @@ impl std::iter::FusedIterator for RangeChunksExact {} /// Return an iterator over sub-ranges of `range`. If `range.len()` is not a /// multiple of `chunk_size` then there will be a remainder after iteration -/// completes, available via [RangeChunksExact::remainder]. +/// completes, available via [`RangeChunksExact::remainder`]. pub fn range_chunks_exact(range: Range, chunk_size: usize) -> RangeChunksExact { RangeChunksExact { remainder: range, diff --git a/rten-tensor/src/index_iterator.rs b/rten-tensor/src/index_iterator.rs index 1cbe8511..38a23552 100644 --- a/rten-tensor/src/index_iterator.rs +++ b/rten-tensor/src/index_iterator.rs @@ -11,7 +11,7 @@ impl IndexArray for [usize; N] {} pub type DynIndex = SmallVec<[usize; 5]>; /// Iterator over a range of N-dimensional indices, where N may be known at -/// compile time (see [NdIndices]) or only at runtime ([DynIndices]). +/// compile time (see [`NdIndices`]) or only at runtime ([`DynIndices`]). /// /// The number of dimensions may be zero, in which case the iterator will yield /// a single empty index. This is consistent with eg. `ndindex` in NumPy. @@ -175,7 +175,7 @@ impl Iterator for NdIndices { impl ExactSizeIterator for NdIndices {} impl FusedIterator for NdIndices {} -/// Max tensor rank supported by the variant of [DynIndices] that is optimized +/// Max tensor rank supported by the variant of [`DynIndices`] that is optimized /// for small-rank tensors. const DYN_SMALL_LEN: usize = 4; diff --git a/rten-tensor/src/iterators.rs b/rten-tensor/src/iterators.rs index b61de792..080b7bcb 100644 --- a/rten-tensor/src/iterators.rs +++ b/rten-tensor/src/iterators.rs @@ -15,7 +15,7 @@ use super::{ }; /// Borrowed reference to a tensor's data and layout. This differs from -/// [TensorView] in that it borrows the layout rather than having its own. +/// [`TensorView`] in that it borrows the layout rather than having its own. /// /// `'d` is the lifetime of the data and `'l` the lifetime of the layout. pub(crate) struct ViewRef<'d, 'l, T, L: Layout> { @@ -46,7 +46,7 @@ impl<'d, 'l, T, L: Layout> Clone for ViewRef<'d, 'l, T, L> { } /// Mutably borrowed reference to a tensor's data and layout. This differs from -/// [TensorViewMut] in that it borrows the layout rather than having its own. +/// [`TensorViewMut`] in that it borrows the layout rather than having its own. pub(crate) struct MutViewRef<'d, 'l, T, L: Layout> { data: ViewMutData<'d, T>, layout: &'l L, @@ -603,9 +603,9 @@ impl<'a, T> ExactSizeIterator for Lanes<'a, T> {} impl<'a, T> FusedIterator for Lanes<'a, T> {} -/// Mutable version of [Lanes]. +/// Mutable version of [`Lanes`]. /// -/// Unlike [Lanes], this does not implement [Iterator] due to complications +/// Unlike [`Lanes`], this does not implement [`Iterator`] due to complications /// in implementing this for an iterator that returns mutable references, but /// it has a similar interface. pub struct LanesMut<'a, T> { @@ -894,7 +894,7 @@ impl<'a, T, L: MutLayout> Iterator for InnerIterDynMut<'a, T, L> { impl<'a, T, L: MutLayout> ExactSizeIterator for InnerIterDynMut<'a, T, L> {} -/// Iterator over slices of a tensor along an axis. See [TensorView::axis_iter]. +/// Iterator over slices of a tensor along an axis. See [`TensorView::axis_iter`]. pub struct AxisIter<'a, T, L: MutLayout + RemoveDim> { view: TensorBase, L>, axis: usize, @@ -926,7 +926,7 @@ impl<'a, T, L: MutLayout + RemoveDim> Iterator for AxisIter<'a, T, L> { } } -/// Iterator over mutable slices of a tensor along an axis. See [TensorViewMut::axis_iter_mut]. +/// Iterator over mutable slices of a tensor along an axis. See [`TensorViewMut::axis_iter_mut`]. pub struct AxisIterMut<'a, T, L: MutLayout + RemoveDim> { view: TensorBase, L>, axis: usize, @@ -975,7 +975,7 @@ impl<'a, T, L: MutLayout + RemoveDim> Iterator for AxisIterMut<'a, T, L> { } } -/// Iterator over slices of a tensor along an axis. See [TensorView::axis_chunks]. +/// Iterator over slices of a tensor along an axis. See [`TensorView::axis_chunks`]. pub struct AxisChunks<'a, T, L: MutLayout> { remainder: Option, L>>, axis: usize, @@ -1017,7 +1017,7 @@ impl<'a, T, L: MutLayout> Iterator for AxisChunks<'a, T, L> { } } -/// Iterator over mutable slices of a tensor along an axis. See [TensorViewMut::axis_chunks_mut]. +/// Iterator over mutable slices of a tensor along an axis. See [`TensorViewMut::axis_chunks_mut`]. pub struct AxisChunksMut<'a, T, L: MutLayout> { remainder: Option, L>>, axis: usize, diff --git a/rten-tensor/src/layout.rs b/rten-tensor/src/layout.rs index ff112221..f3be4b82 100644 --- a/rten-tensor/src/layout.rs +++ b/rten-tensor/src/layout.rs @@ -20,8 +20,8 @@ pub fn is_valid_permutation(ndim: usize, permutation: &[usize]) -> bool { /// size of each, and the mapping between indices and offsets in the data /// storage. /// -/// The main implementations are [NdLayout], where the dimension count is known -/// statically, and [DynLayout], where the dimension count is only known at +/// The main implementations are [`NdLayout`], where the dimension count is known +/// statically, and [`DynLayout`], where the dimension count is only known at /// runtime. pub trait Layout { /// Type used to represent indices. @@ -866,7 +866,7 @@ impl From> for DynLayout { } } -/// MutLayout extends [Layout] with methods for creating, modifying and +/// MutLayout extends [`Layout`] with methods for creating, modifying and /// transforming layouts. pub trait MutLayout: Layout + Clone { /// Create a new contiguous layout with a given shape. @@ -1212,10 +1212,10 @@ impl<'a> IntoLayout for &'a [usize] { } } -/// Trait which extends [MutLayout] with support for changing the number of +/// Trait which extends [`MutLayout`] with support for changing the number of /// dimensions in-place. /// -/// This is only implemented for [DynLayout], since layouts that have a static +/// This is only implemented for [`DynLayout`], since layouts that have a static /// rank cannot change their dimension count at runtime. pub trait ResizeLayout: MutLayout { /// Insert a size-one axis at the given index in the shape. This will have diff --git a/rten-tensor/src/lib.rs b/rten-tensor/src/lib.rs index 168a85a6..2039a733 100644 --- a/rten-tensor/src/lib.rs +++ b/rten-tensor/src/lib.rs @@ -60,7 +60,7 @@ pub trait RandomSource { /// Storage allocation trait. /// -/// This is used by various methods on [TensorBase] with an `_in` suffix, +/// This is used by various methods on [`TensorBase`] with an `_in` suffix, /// which allow the caller to control the allocation of the data buffer for /// the returned owned tensor. pub trait Alloc { @@ -76,7 +76,7 @@ impl Alloc for &A { } } -/// Implementation of [Alloc] which wraps the global allocator. +/// Implementation of [`Alloc`] which wraps the global allocator. pub struct GlobalAlloc {} impl GlobalAlloc { diff --git a/rten-tensor/src/slice_range.rs b/rten-tensor/src/slice_range.rs index 1dee6c93..5e1090e7 100644 --- a/rten-tensor/src/slice_range.rs +++ b/rten-tensor/src/slice_range.rs @@ -88,7 +88,7 @@ where /// - `[SliceItem]` slices /// /// Ranges can be specified using regular Rust ranges (eg. `start..end`, -/// `start..`, `..end`, `..`) or a [SliceRange], which extends regular Rust +/// `start..`, `..end`, `..`) or a [`SliceRange`], which extends regular Rust /// ranges with support for steps and specifying endpoints using negative /// values, which behaves similarly to using negative values in NumPy. pub trait IntoSliceItems { @@ -157,22 +157,22 @@ impl, T2: Into, T3: Into, T4: Into; -/// Convert a slice of indices into [SliceItem]s. +/// Convert a slice of indices into [`SliceItem`]s. /// -/// To convert indices of a statically known length to [SliceItem]s, use -/// [IntoSliceItems] instead. This function is for the case when the length +/// To convert indices of a statically known length to [`SliceItem`]s, use +/// [`IntoSliceItems`] instead. This function is for the case when the length /// is not statically known, but is assumed to likely be small. pub fn to_slice_items>(index: &[T]) -> DynSliceItems { index.iter().map(|x| x.clone().into()).collect() } -/// A range for slicing a [Tensor](crate::Tensor) or [NdTensor](crate::NdTensor). +/// A range for slicing a [`Tensor`](crate::Tensor) or [`NdTensor`](crate::NdTensor). /// -/// This has two main differences from [Range]. +/// This has two main differences from [`Range`]. /// /// - A non-zero step between indices can be specified. The step can be negative, /// which means that the dimension should be traversed in reverse order. @@ -468,7 +468,7 @@ impl IntoIterator for IndexRange { } } -/// An iterator over the indices in an [IndexRange]. +/// An iterator over the indices in an [`IndexRange`]. #[derive(Clone, Debug, PartialEq)] pub struct IndexRangeIter { /// Next index. This is in the range [-1, N] where `N` is the size of diff --git a/rten-tensor/src/storage.rs b/rten-tensor/src/storage.rs index 0eb224c3..40b7f020 100644 --- a/rten-tensor/src/storage.rs +++ b/rten-tensor/src/storage.rs @@ -4,7 +4,7 @@ use std::ops::Range; /// Trait for backing storage used by tensors and views. /// -/// Mutable tensors have storage which also implement [StorageMut]. +/// Mutable tensors have storage which also implement [`StorageMut`]. /// /// This specifies a contiguous array of elements in memory, as a pointer and a /// length. The storage may be owned or borrowed. For borrowed storage, there @@ -166,7 +166,7 @@ fn assert_storage_range_valid(storage: &S, range: Range *mut Self::Elem; - /// Mutable version of [Storage::get]. + /// Mutable version of [`Storage::get`]. /// /// # Safety /// @@ -191,7 +191,7 @@ pub unsafe trait StorageMut: Storage { } } - /// Mutable version of [Storage::get_unchecked]. + /// Mutable version of [`Storage::get_unchecked`]. /// /// # Safety /// @@ -276,11 +276,11 @@ impl<'a, T> Clone for ViewData<'a, T> { impl<'a, T> Copy for ViewData<'a, T> {} impl<'a, T> ViewData<'a, T> { - /// Variant of [Storage::get] which preserves lifetimes. + /// Variant of [`Storage::get`] which preserves lifetimes. /// /// # Safety /// - /// See [Storage::get]. + /// See [`Storage::get`]. pub unsafe fn get(&self, offset: usize) -> Option<&'a T> { if offset < self.len { Some(unsafe { &*self.ptr.add(offset) }) @@ -289,17 +289,17 @@ impl<'a, T> ViewData<'a, T> { } } - /// Variant of [Storage::get_unchecked] which preserves lifetimes. + /// Variant of [`Storage::get_unchecked`] which preserves lifetimes. /// /// # Safety /// - /// See [Storage::get_unchecked]. + /// See [`Storage::get_unchecked`]. pub unsafe fn get_unchecked(&self, offset: usize) -> &'a T { debug_assert!(offset < self.len); &*self.ptr.add(offset) } - /// Variant of [Storage::slice] which preserves lifetimes. + /// Variant of [`Storage::slice`] which preserves lifetimes. pub fn slice(&self, range: Range) -> ViewData<'a, T> { assert_storage_range_valid(self, range.clone()); ViewData { @@ -311,7 +311,7 @@ impl<'a, T> ViewData<'a, T> { } } - /// Variant of [Storage::view] which preserves lifetimes. + /// Variant of [`Storage::view`] which preserves lifetimes. pub fn view(&self) -> ViewData<'a, T> { self.slice(0..self.len()) } @@ -356,12 +356,12 @@ pub struct ViewMutData<'a, T> { unsafe impl<'a, T> Send for ViewMutData<'a, T> {} impl<'a, T> ViewMutData<'a, T> { - /// Variant of [StorageMut::as_slice_mut] which preserves the underlying + /// Variant of [`StorageMut::as_slice_mut`] which preserves the underlying /// lifetime in the result. /// /// # Safety /// - /// See [StorageMut::as_slice_mut]. + /// See [`StorageMut::as_slice_mut`]. pub unsafe fn to_slice_mut(mut self) -> &'a mut [T] { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) } @@ -414,12 +414,12 @@ unsafe impl<'a, T> StorageMut for ViewMutData<'a, T> { /// Tensor storage which may be either owned or borrowed. /// -/// The name is taken from [std::borrow::Cow] in the standard library, +/// The name is taken from [`std::borrow::Cow`] in the standard library, /// which is conceptually similar. pub enum CowData<'a, T> { - /// A [CowData] that owns its data. + /// A [`CowData`] that owns its data. Owned(Vec), - /// A [CowData] that borrows data. + /// A [`CowData`] that borrows data. Borrowed(ViewData<'a, T>), } diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index c7f67931..cc65c133 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -28,7 +28,7 @@ use crate::{Alloc, GlobalAlloc, IntoSliceItems, RandomSource, SliceItem}; /// The storage can be owned (like a `Vec`), borrowed (like `&[T]`) or /// mutably borrowed (like `&mut [T]`). The layout can have a dimension count /// that is determined statically (ie. forms part of the tensor's type), see -/// [NdLayout] or is only known at runtime, see [DynLayout]. +/// [`NdLayout`] or is only known at runtime, see [`DynLayout`]. pub struct TensorBase { data: S, @@ -44,7 +44,7 @@ pub struct TensorBase { layout: L, } -/// Trait implemented by all variants of [TensorBase], which provides a +/// Trait implemented by all variants of [`TensorBase`], which provides a /// `view` method to get an immutable view of the tensor, plus methods which /// forward to such a view. /// @@ -55,7 +55,7 @@ pub struct TensorBase { /// `tensor.slice(...).transpose()`) without needing to separate each step /// into separate statements. /// -/// This trait is conceptually similar to the way [std::ops::Deref] in the Rust +/// This trait is conceptually similar to the way [`std::ops::Deref`] in the Rust /// standard library allows a `Vec` to have all the methods of an `&[T]`. /// /// If stable Rust gains support for specialization or a `Deref` trait that can @@ -75,7 +75,7 @@ pub trait AsView: Layout { /// Return the layout of this tensor. fn layout(&self) -> &Self::Layout; - /// Return a view of this tensor using a borrowed [CowData] for storage. + /// Return a view of this tensor using a borrowed [`CowData`] for storage. /// /// Together with [`into_cow`](TensorBase::into_cow), this is useful where /// code needs to conditionally copy or create a new tensor, and get either @@ -451,7 +451,7 @@ pub trait AsView: Layout { } /// Return a view which performs "weak" checking when indexing via - /// `view[]`. See [WeaklyCheckedView] for an explanation. + /// `view[]`. See [`WeaklyCheckedView`] for an explanation. fn weakly_checked_view(&self) -> WeaklyCheckedView, Self::Layout> { self.view().weakly_checked_view() } @@ -502,8 +502,8 @@ impl TensorBase { /// strides. /// /// This will fail if the data length is incorrect for the shape and stride - /// combination, or if the strides lead to overlap (see [OverlapPolicy]). - /// See also [TensorBase::from_slice_with_strides] which is a similar method + /// combination, or if the strides lead to overlap (see [`OverlapPolicy`]). + /// See also [`TensorBase::from_slice_with_strides`] which is a similar method /// for immutable views that does allow overlapping strides. pub fn from_data_with_strides>( shape: L::Index<'_>, @@ -732,7 +732,7 @@ impl TensorBase { /// Permute the order of dimensions according to the given order. /// - /// See [AsView::permuted]. + /// See [`AsView::permuted`]. pub fn permuted_mut(&mut self, order: L::Index<'_>) -> TensorBase, L> { TensorBase { layout: self.layout.permuted(order), @@ -742,7 +742,7 @@ impl TensorBase { /// Change the layout of the tensor without moving any data. /// - /// See [AsView::reshaped]. + /// See [`AsView::reshaped`]. pub fn reshaped_mut( &mut self, shape: SH, @@ -814,7 +814,7 @@ impl TensorBase { } /// Return a mutable view that performs only "weak" checking when indexing, - /// this is faster but can hide bugs. See [WeaklyCheckedView]. + /// this is faster but can hide bugs. See [`WeaklyCheckedView`]. pub fn weakly_checked_view_mut(&mut self) -> WeaklyCheckedView, L> { WeaklyCheckedView { base: self.view_mut(), @@ -942,10 +942,10 @@ impl TensorBase, L> { has_capacity.then_some(new_layout) } - /// Convert the storage of this tensor into an owned [CowData]. + /// Convert the storage of this tensor into an owned [`CowData`]. /// /// This is useful in contexts where code needs to conditionally copy or - /// create a new tensor. See [AsView::as_cow]. + /// create a new tensor. See [`AsView::as_cow`]. pub fn into_cow(self) -> TensorBase, L> { let TensorBase { data, layout } = self; TensorBase { @@ -956,7 +956,7 @@ impl TensorBase, L> { /// Consume self and return the underlying data as a contiguous tensor. /// - /// See also [TensorBase::to_vec]. + /// See also [`TensorBase::to_vec`]. pub fn into_data(self) -> Vec where T: Clone, @@ -995,7 +995,7 @@ impl TensorBase, L> { /// /// Each call to `f` will receive an element index and should return the /// corresponding value. If the function does not need this index, use - /// [from_simple_fn](TensorBase::from_simple_fn) instead, as it is faster. + /// [`from_simple_fn`](TensorBase::from_simple_fn) instead, as it is faster. pub fn from_fn) -> T, Idx>( shape: L::Index<'_>, mut f: F, @@ -1104,7 +1104,7 @@ impl TensorBase, L> { /// Return a new tensor containing uninitialized elements. /// /// The caller must initialize elements and then call - /// [assume_init](TensorBase::assume_init) to convert to an initialized + /// [`assume_init`](TensorBase::assume_init) to convert to an initialized /// `Tensor`. pub fn uninit(shape: L::Index<'_>) -> TensorBase>, L> where @@ -1216,7 +1216,7 @@ where /// Convert a tensor of potentially uninitialized elements to one of /// initialized elements. /// - /// See also [MaybeUninit::assume_init]. + /// See also [`MaybeUninit::assume_init`]. /// /// # Safety /// @@ -1276,7 +1276,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Return a view of this tensor with a dynamic dimension count. /// - /// See [AsView::as_dyn]. + /// See [`AsView::as_dyn`]. pub fn as_dyn(&self) -> TensorBase, DynLayout> { TensorBase { data: self.data, @@ -1284,9 +1284,9 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { } } - /// Convert the storage of this view to a borrowed [CowData]. + /// Convert the storage of this view to a borrowed [`CowData`]. /// - /// See [AsView::as_cow]. + /// See [`AsView::as_cow`]. pub fn as_cow(&self) -> TensorBase, L> { TensorBase { layout: self.layout.clone(), @@ -1296,7 +1296,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Broadcast this view to another shape. /// - /// See [AsView::broadcast]. + /// See [`AsView::broadcast`]. pub fn broadcast(&self, shape: S) -> TensorBase, S::Layout> where L: BroadcastLayout, @@ -1332,9 +1332,9 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Create a new view with a given shape and data slice, and custom strides. /// - /// If you do not need to specify custom strides, use [TensorBase::from_data] - /// instead. This method is similar to [TensorBase::from_data_with_strides], - /// but allows strides that lead to internal overlap (see [OverlapPolicy]). + /// If you do not need to specify custom strides, use [`TensorBase::from_data`] + /// instead. This method is similar to [`TensorBase::from_data_with_strides`], + /// but allows strides that lead to internal overlap (see [`OverlapPolicy`]). pub fn from_slice_with_strides( shape: L::Index<'_>, data: &'a [T], @@ -1383,14 +1383,14 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Return an iterator over the inner `N` dimensions of this tensor. /// - /// See [AsView::inner_iter]. + /// See [`AsView::inner_iter`]. pub fn inner_iter(&self) -> InnerIter<'a, T, N> { InnerIter::new(self.view()) } /// Return an iterator over the inner `n` dimensions of this tensor. /// - /// See [AsView::inner_iter_dyn]. + /// See [`AsView::inner_iter_dyn`]. pub fn inner_iter_dyn(&self, n: usize) -> InnerIterDyn<'a, T, L> { InnerIterDyn::new(self.view(), n) } @@ -1409,14 +1409,14 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Return an iterator over elements of this tensor in their logical order. /// - /// See [AsView::iter]. + /// See [`AsView::iter`]. pub fn iter(&self) -> Iter<'a, T> { Iter::new(self.view_ref()) } /// Return an iterator over 1D slices of this tensor along a given dimension. /// - /// See [AsView::lanes]. + /// See [`AsView::lanes`]. pub fn lanes(&self, dim: usize) -> Lanes<'a, T> { Lanes::new(self.view_ref(), dim) } @@ -1434,7 +1434,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Permute the axes of this tensor according to `order`. /// - /// See [AsView::permuted]. + /// See [`AsView::permuted`]. pub fn permuted(&self, order: L::Index<'_>) -> TensorBase, L> { TensorBase { data: self.data, @@ -1444,7 +1444,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Change the shape of this tensor without copying data. /// - /// See [AsView::reshaped]. + /// See [`AsView::reshaped`]. pub fn reshaped(&self, shape: S) -> TensorBase, S::Layout> { TensorBase { data: self.data, @@ -1485,7 +1485,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Remove all size-one dimensions from this tensor. /// - /// See [AsView::squeezed]. + /// See [`AsView::squeezed`]. pub fn squeezed(&self) -> TensorView<'a, T> { TensorBase { data: self.data.view(), @@ -1562,7 +1562,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Return the underlying data as a flat slice if the tensor is contiguous, /// or a copy of the data as a flat slice otherwise. /// - /// See [AsView::to_slice]. + /// See [`AsView::to_slice`]. pub fn to_slice(&self) -> Cow<'a, [T]> where T: Clone, @@ -1572,7 +1572,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { .unwrap_or_else(|| Cow::Owned(self.to_vec())) } - /// Reverse the order of dimensions in this tensor. See [AsView::transposed]. + /// Reverse the order of dimensions in this tensor. See [`AsView::transposed`]. pub fn transposed(&self) -> TensorBase, L> { TensorBase { data: self.data, @@ -1591,7 +1591,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { }) } - /// Return a read-only view of this tensor. See [AsView::view]. + /// Return a read-only view of this tensor. See [`AsView::view`]. pub fn view(&self) -> TensorBase, L> { TensorBase { data: self.data, @@ -1989,7 +1989,7 @@ impl, const N: usize> TensorBase> { /// Store an array of `M` elements into successive entries of a tensor along /// the `dim` axis. /// - /// See [TensorBase::get_array] for more details. + /// See [`TensorBase::get_array`] for more details. #[inline] pub fn set_array(&mut self, base: [usize; N], dim: usize, values: [T; M]) where diff --git a/rten-tensor/src/test_util.rs b/rten-tensor/src/test_util.rs index aa1bd0b5..8874f09f 100644 --- a/rten-tensor/src/test_util.rs +++ b/rten-tensor/src/test_util.rs @@ -133,7 +133,7 @@ where /// Check that the shapes of two tensors are equal and that their contents /// are approximately equal. /// -/// This is like [expect_equal] but allows a custom absolute tolerance value. +/// This is like [`expect_equal`] but allows a custom absolute tolerance value. pub fn expect_equal_with_tolerance( x: &V, y: &V, diff --git a/rten-text/src/normalizer.rs b/rten-text/src/normalizer.rs index 1f0d2c9b..8e4b55b9 100644 --- a/rten-text/src/normalizer.rs +++ b/rten-text/src/normalizer.rs @@ -73,10 +73,10 @@ pub struct Normalizer { strip_accents: bool, } -/// Configuration for a [Normalizer]. +/// Configuration for a [`Normalizer`]. #[derive(Clone, Debug, Default)] pub struct NormalizerOptions { - /// If true, convert all text to lowercase using [char::to_lowercase]. + /// If true, convert all text to lowercase using [`char::to_lowercase`]. pub lowercase: bool, /// Whether to strip accents when tokenizing. An "accent" is defined as diff --git a/rten-text/src/split.rs b/rten-text/src/split.rs index b33506ca..a971e347 100644 --- a/rten-text/src/split.rs +++ b/rten-text/src/split.rs @@ -57,7 +57,7 @@ impl SliceExt for [T] { } } -/// Iterator returned by [SplitExt::split_keep_delimeters]. +/// Iterator returned by [`SplitExt::split_keep_delimeters`]. pub struct SplitKeepDelim<'a, P: FnMut(char) -> bool> { remainder: &'a str, predicate: P, diff --git a/rten-text/src/tokenizers.rs b/rten-text/src/tokenizers.rs index e2dbadf2..3140585d 100644 --- a/rten-text/src/tokenizers.rs +++ b/rten-text/src/tokenizers.rs @@ -25,7 +25,7 @@ mod wordpiece; pub use bpe::{patterns, Bpe, BpeError}; pub use wordpiece::{WordPiece, WordPieceOptions}; -/// Input sequences for [Tokenizer::encode]. +/// Input sequences for [`Tokenizer::encode`]. #[derive(Copy, Clone, Debug, PartialEq)] pub enum EncoderInput<'a> { /// Encoder input with a single sequence. @@ -53,10 +53,10 @@ impl<'a> From<(&'a str, &'a str)> for EncoderInput<'a> { /// Integer type used to represent token IDs. pub type TokenId = u32; -/// Output produced by a [Tokenizer::encode] implementation. +/// Output produced by a [`Tokenizer::encode`] implementation. /// -/// Use [Encoded::token_ids] to get the token IDs to feed to a model, and -/// [Encoded::text_for_token_range] to map token ID ranges back to the +/// Use [`Encoded::token_ids`] to get the token IDs to feed to a model, and +/// [`Encoded::text_for_token_range`] to map token ID ranges back to the /// corresponding input text. #[derive(Debug)] pub struct Encoded<'a> { @@ -140,8 +140,8 @@ impl<'a> Encoded<'a> { } } -/// Options that control chunking and truncation by [Tokenizer::encode] and -/// [Tokenizer::encode_chunks]. +/// Options that control chunking and truncation by [`Tokenizer::encode`] and +/// [`Tokenizer::encode_chunks`]. #[derive(Clone, Default)] pub struct EncodeOptions { /// Maximum number of tokens in each chunk, including any special tokens @@ -156,7 +156,7 @@ pub struct EncodeOptions { /// using a pre-computed model. /// /// Encoders are not generally used directly but instead via a wrapping -/// [Tokenizer]. +/// [`Tokenizer`]. pub trait Encoder { /// Look up the numeric ID for a token given its canonical string /// representation. This is used eg. for looking up the IDs of special @@ -222,7 +222,7 @@ pub trait Encoder { fn decode(&self, ids: &[TokenId]) -> Result; } -/// Errors returned by [Tokenizer::from_json]. +/// Errors returned by [`Tokenizer::from_json`]. #[derive(Debug)] pub enum FromJsonError { /// There was an error loading a BPE tokenizer. @@ -248,7 +248,7 @@ impl Error for FromJsonError {} /// Tokenizes text inputs into sequences of token IDs that can be fed to a /// machine learning model. /// -/// `Tokenizer` wraps an [Encoder] which handles specific methods of encoding of +/// `Tokenizer` wraps an [`Encoder`] which handles specific methods of encoding of /// individual sequences (eg. WordPiece, Byte Pair Encoding, Unigram) and adds /// common functionality such as injecting special tokens, splitting sequences /// into overlapping chunks and truncating long sequences. @@ -262,7 +262,7 @@ pub struct Tokenizer { sep_token: Option, } -/// Configuration for a [Tokenizer]. +/// Configuration for a [`Tokenizer`]. #[derive(Clone, Default)] pub struct TokenizerOptions<'a> { /// Token added at the start of the output. For BERT models, this is the @@ -420,7 +420,7 @@ impl Tokenizer { /// Encode one or two sequences into a sequence of tokens. /// /// The output is split into chunks such that the number of tokens in - /// each chunk is less than the limit specified in [EncodeOptions]. + /// each chunk is less than the limit specified in [`EncodeOptions`]. pub fn encode_chunks<'a>( &self, input: EncoderInput<'a>, @@ -599,7 +599,7 @@ pub enum TokenizerError { /// There was an error parsing a byte sequence as a UTF-8 string. /// - /// This can arise when working with tokenizers like [Bpe] where + /// This can arise when working with tokenizers like [`Bpe`] where /// individual tokens do not always represent whole characters. InvalidUtf8, } diff --git a/rten-text/src/tokenizers/bpe.rs b/rten-text/src/tokenizers/bpe.rs index d98b55ab..d7e3ba41 100644 --- a/rten-text/src/tokenizers/bpe.rs +++ b/rten-text/src/tokenizers/bpe.rs @@ -7,7 +7,7 @@ use fancy_regex::Regex; use crate::tokenizers::{Encoder, TokenId, TokenizerError}; -/// Errors that can occur when building a [Bpe] tokenizer or encoding or +/// Errors that can occur when building a [`Bpe`] tokenizer or encoding or /// decoding text using it. #[derive(Debug)] pub enum BpeError { @@ -49,7 +49,7 @@ type Rank = u32; /// a character. type EncodedByteSlice<'a> = &'a str; -/// Like [EncodedByteSlice], but owned. +/// Like [`EncodedByteSlice`], but owned. type EncodedBytes = String; /// Return true if `c` is considered a printable character. @@ -138,7 +138,7 @@ fn bpe_merge(tokens: &mut Vec, ranks: &HashMap<(Rank, Rank), Rank>) -> usi } struct BpeBuilder { - /// See [ByteLevelBpe::merges]. + /// See [`ByteLevelBpe::merges`]. ranks: HashMap<(Rank, Rank), Rank>, /// Mapping between encoded tokens and their rank in the BPE merge list. In diff --git a/rten-text/src/tokenizers/wordpiece.rs b/rten-text/src/tokenizers/wordpiece.rs index e452707e..95e7a559 100644 --- a/rten-text/src/tokenizers/wordpiece.rs +++ b/rten-text/src/tokenizers/wordpiece.rs @@ -25,7 +25,7 @@ pub struct WordPiece { max_word_len: usize, } -/// Configuration for a [WordPiece] tokenizer. +/// Configuration for a [`WordPiece`] tokenizer. #[derive(Debug, Default, Clone)] pub struct WordPieceOptions { /// The normalizer that handles Unicode normalization, lower-casing the diff --git a/rten-vecmath/src/erf.rs b/rten-vecmath/src/erf.rs index 8ea3ce58..4e4b7aa4 100644 --- a/rten-vecmath/src/erf.rs +++ b/rten-vecmath/src/erf.rs @@ -67,14 +67,14 @@ impl SimdUnaryOp for SimdErf { /// Vectorized error function. /// -/// This is a vectorized version of [erf] that computes the function for each +/// This is a vectorized version of [`erf`] that computes the function for each /// element in `xs` and writes the result to `out`. `xs` and `out` must be equal /// in length. pub fn vec_erf(xs: &[f32], out: &mut [MaybeUninit]) { dispatch_map_op(xs, out, SimdErf {}); } -/// Variant of [vec_erf] that modifies elements in-place. +/// Variant of [`vec_erf`] that modifies elements in-place. pub fn vec_erf_in_place(xs: &mut [f32]) { dispatch_map_op_in_place(xs, SimdErf {}); } @@ -106,7 +106,7 @@ pub fn vec_gelu(xs: &[f32], out: &mut [MaybeUninit]) { dispatch_map_op(xs, out, SimdGelu {}); } -/// Variant of [vec_gelu] that modifies elements in-place. +/// Variant of [`vec_gelu`] that modifies elements in-place. pub fn vec_gelu_in_place(xs: &mut [f32]) { dispatch_map_op_in_place(xs, SimdGelu {}); } diff --git a/rten-vecmath/src/exp.rs b/rten-vecmath/src/exp.rs index 6c2e4716..cc82e2cc 100644 --- a/rten-vecmath/src/exp.rs +++ b/rten-vecmath/src/exp.rs @@ -26,10 +26,10 @@ const EXP_POLY_4: f32 = 4.16695364e-2; // ~ 1/4! or 1/24 const EXP_POLY_5: f32 = 8.37312452e-3; // ~ 1/5! or 1/120 const EXP_POLY_6: f32 = 1.37805939e-3; // ~ 1/6! or 1/720 -/// Computes e^val. Functionally equivalent to [f32::exp]. +/// Computes e^val. Functionally equivalent to [`f32::exp`]. /// -/// This is scalar variant of [vec_exp] that uses exactly the same algorithm. -/// It has no performance or correctness advantage over [f32::exp] on most systems. +/// This is scalar variant of [`vec_exp`] that uses exactly the same algorithm. +/// It has no performance or correctness advantage over [`f32::exp`] on most systems. pub fn exp(val: f32) -> f32 { // Safety: f32 is available on all systems. unsafe { simd_exp(val) } @@ -150,7 +150,7 @@ unsafe fn simd_sigmoid(x: S) -> S { /// Computes the [sigmoid function][sigmoid], aka. the standard logistic function, `1. / /// (1. + (-x).exp())`. /// -/// This is a scalar variant of [vec_sigmoid] that uses the same algorithm. +/// This is a scalar variant of [`vec_sigmoid`] that uses the same algorithm. /// /// [sigmoid]: https://en.wikipedia.org/wiki/Logistic_function#Mathematical_properties pub fn sigmoid(x: f32) -> f32 { @@ -168,7 +168,7 @@ impl SimdUnaryOp for SimdSigmoid { /// Vectorized sigmoid function. /// -/// This is a vectorized version of [sigmoid] that computes the function for +/// This is a vectorized version of [`sigmoid`] that computes the function for /// each element in `xs` and writes the result to `out`. `xs` and `out` must be /// equal in length. /// @@ -177,7 +177,7 @@ pub fn vec_sigmoid(xs: &[f32], out: &mut [MaybeUninit]) { dispatch_map_op(xs, out, SimdSigmoid {}); } -/// Variant of [vec_sigmoid] that modifies elements in-place. +/// Variant of [`vec_sigmoid`] that modifies elements in-place. pub fn vec_sigmoid_in_place(xs: &mut [f32]) { dispatch_map_op_in_place(xs, SimdSigmoid {}); } @@ -228,7 +228,7 @@ impl SimdUnaryOp for SimdExp { /// Vectorized exponential function. /// -/// This is a vectorized version of [exp] that computes the function for each +/// This is a vectorized version of [`exp`] that computes the function for each /// element in `xs` and writes the result to `out`. `xs` and `out` must be equal /// in length. /// @@ -237,7 +237,7 @@ pub fn vec_exp(xs: &[f32], out: &mut [MaybeUninit]) { dispatch_map_op(xs, out, SimdExp {}); } -/// Variant of [vec_exp] that modifies elements in-place. +/// Variant of [`vec_exp`] that modifies elements in-place. pub fn vec_exp_in_place(xs: &mut [f32]) { dispatch_map_op_in_place(xs, SimdExp {}); } diff --git a/rten-vecmath/src/testing.rs b/rten-vecmath/src/testing.rs index 5ef3259e..2910d214 100644 --- a/rten-vecmath/src/testing.rs +++ b/rten-vecmath/src/testing.rs @@ -102,7 +102,7 @@ impl Iterator for Progress { } } -/// Iterator over an arithmetic range. See [arange]. +/// Iterator over an arithmetic range. See [`arange`]. pub struct ARange> { next: T, end: T, diff --git a/src/constant_storage.rs b/src/constant_storage.rs index a5c997f0..1785fb50 100644 --- a/src/constant_storage.rs +++ b/src/constant_storage.rs @@ -19,9 +19,9 @@ fn slice_address_range(slice: &[T]) -> Range { /// usually be a model data/weights file (eg. in FlatBuffers format) read in /// or memory-mapped from disk. /// -/// This can be used as the storage for tensor views by creating [ArcSlice] +/// This can be used as the storage for tensor views by creating [`ArcSlice`] /// instances which reference a region of this buffer, and then using that -/// slice as the storage for an [ArcTensorView]. +/// slice as the storage for an [`ArcTensorView`]. #[derive(Debug)] pub enum ConstantStorage { /// Storage that references a memory-mapped file. @@ -125,7 +125,7 @@ unsafe impl Storage for ArcSlice { } } -/// Tensor view whose data is a slice of a buffer owned by a [ConstantStorage]. +/// Tensor view whose data is a slice of a buffer owned by a [`ConstantStorage`]. pub type ArcTensorView = TensorBase, DynLayout>; #[cfg(test)] diff --git a/src/ctc.rs b/src/ctc.rs index 2c331c1c..acfbe6ce 100644 --- a/src/ctc.rs +++ b/src/ctc.rs @@ -15,8 +15,8 @@ use crate::Operators; /// output class labels. The label 0 is reserved for the CTC blank label. /// /// Different decoding methods are available. Greedy decoding with -/// [CtcDecoder::decode_greedy] is very fast, but considers only the most likely -/// label at each time step. Beam searches with [CtcDecoder::decode_beam] +/// [`CtcDecoder::decode_greedy`] is very fast, but considers only the most likely +/// label at each time step. Beam searches with [`CtcDecoder::decode_beam`] /// consider the N most probable paths through the matrix. This may produce more /// accurate results, but is significantly slower. /// @@ -25,7 +25,7 @@ use crate::Operators; /// [^2]: pub struct CtcDecoder {} -/// Item in an output sequence produced by [CtcDecoder]. +/// Item in an output sequence produced by [`CtcDecoder`]. #[derive(Clone, Copy, Debug)] pub struct DecodeStep { /// Class label. @@ -40,7 +40,7 @@ pub struct DecodeStep { pub pos: u32, } -/// A search state for beam decoding by [CtcDecoder]. This consists of a +/// A search state for beam decoding by [`CtcDecoder`]. This consists of a /// decoded sequence and associated probabilities. #[derive(Debug)] struct BeamState { @@ -82,7 +82,7 @@ fn log_sum_exp(log_probs: [f32; N]) -> f32 { } } -/// Result of decoding a sequence using [CtcDecoder]. +/// Result of decoding a sequence using [`CtcDecoder`]. /// /// This consists of a sequence of class labels and a score. #[derive(Clone, Debug)] @@ -144,7 +144,7 @@ impl CtcDecoder { /// /// This method chooses the label with the highest probability at each /// time step. This method is very fast, but may return less accurate - /// results than [CtcDecoder::decode_beam]. + /// results than [`CtcDecoder::decode_beam`]. /// /// `prob_seq` is a `[sequence, n_labels]` matrix of log probabilities of /// labels at each time step, where the label value 0 is reserved for the @@ -179,7 +179,7 @@ impl CtcDecoder { /// Decode sequence using a beam search and return the N best hypotheses. /// - /// See also [CtcDecoder::decode_beam]. + /// See also [`CtcDecoder::decode_beam`]. pub fn decode_beam_nbest( &self, prob_seq: NdTensorView, diff --git a/src/gemm.rs b/src/gemm.rs index 0c7e32db..4b8260e3 100644 --- a/src/gemm.rs +++ b/src/gemm.rs @@ -99,7 +99,7 @@ pub enum GemmInputA<'a, T> { /// A standard unpacked matrix. Unpacked(Matrix<'a, T>), - /// A matrix which has been pre-packed by [GemmExecutor::prepack_a]. + /// A matrix which has been pre-packed by [`GemmExecutor::prepack_a`]. Packed(&'a PackedAMatrix), // TODO - Support virtual "A" inputs, like `GemmInputB::Virtual`. } @@ -140,7 +140,7 @@ impl GemmOutT for f32 {} /// A virtual matrix which has a known size, but may not actually be /// materialized in memory. The GEMM implementation will call -/// [VirtualMatrix::pack_b] to pack blocks of this matrix into a buffer as it +/// [`VirtualMatrix::pack_b`] to pack blocks of this matrix into a buffer as it /// needs them. /// /// This is useful for operations such as im2col-based convolution, which @@ -186,11 +186,11 @@ pub enum GemmInputB<'a, T> { /// A standard unpacked matrix. Unpacked(Matrix<'a, T>), - /// A matrix which has been pre-packed by [GemmExecutor::prepack_b]. + /// A matrix which has been pre-packed by [`GemmExecutor::prepack_b`]. Packed(&'a PackedBMatrix), /// A virtual matrix, blocks of which will be materialized on-demand - /// during GEMM execution. See [VirtualMatrix]. + /// during GEMM execution. See [`VirtualMatrix`]. Virtual(&'a dyn VirtualMatrix), } @@ -257,7 +257,7 @@ pub struct GemmExecutor GemmExecutor usize { self.kernel.nr() } @@ -435,7 +435,7 @@ impl GemmExecutor GemmExecutor { - /// Create a [GemmExecutor] using the preferred kernel for the current system. + /// Create a [`GemmExecutor`] using the preferred kernel for the current system. pub fn new() -> GemmExecutor { #[cfg(feature = "avx512")] #[cfg(target_arch = "x86_64")] @@ -533,7 +533,7 @@ impl GemmExecutor { Self::with_generic_kernel() } - /// Create a [GemmExecutor] using the given kernel. Returns `None` if the + /// Create a [`GemmExecutor`] using the given kernel. Returns `None` if the /// kernel is not supported. #[allow(dead_code)] // Currently only used in tests pub fn with_kernel(hint: KernelType) -> Option { @@ -613,10 +613,10 @@ struct OutputTile { /// Stride between rows of this tile. Note the column stride is always 1. row_stride: usize, - /// Number of rows in this tile. Will be <= the [Kernel]'s `MR` constant. + /// Number of rows in this tile. Will be <= the [`Kernel`]'s `MR` constant. used_rows: usize, - /// Number of columns in this tile. Will be <= the [Kernel]'s `NR` constant. + /// Number of columns in this tile. Will be <= the [`Kernel`]'s `NR` constant. used_cols: usize, } diff --git a/src/gemm/kernels/simd_generic.rs b/src/gemm/kernels/simd_generic.rs index 92546334..67af6121 100644 --- a/src/gemm/kernels/simd_generic.rs +++ b/src/gemm/kernels/simd_generic.rs @@ -6,7 +6,7 @@ use crate::iter_util::{range_chunks_exact, unroll_loop}; /// Compute an output block of a vector-matrix product ("gemv" in BLAS APIs). /// /// Multiple output columns are computed at a time, using `NR_REGS` SIMD -/// registers of type `S`. See [Kernel::gemv_kernel]. +/// registers of type `S`. See [`Kernel::gemv_kernel`]. /// /// Safety: The `SimdFloat` type must be supported on the current system. #[inline(always)] @@ -89,7 +89,7 @@ pub unsafe fn simd_gemv( } } -/// Variant of [simd_gemv] which handles the case where `b` has unit row stride. +/// Variant of [`simd_gemv`] which handles the case where `b` has unit row stride. #[inline(always)] unsafe fn simd_gemv_transposed( out: &mut [f32], @@ -155,7 +155,7 @@ unsafe fn simd_gemv_transposed( } } -/// Variant of [simd_gemv] which handles the case where `b` has non-unit strides +/// Variant of [`simd_gemv`] which handles the case where `b` has non-unit strides /// for rows and columns. /// /// This doesn't benefit from SIMD operations. It is at least inlined so it @@ -186,7 +186,7 @@ fn simd_gemv_fallback(out: &mut [f32], a: &[f32], b: Matrix, alpha: f32, beta: f /// and `NR_REGS` specifies the number of columns in the tile as a multiple of /// the SIMD register width. /// -/// See [Kernel::kernel]. +/// See [`Kernel::kernel`]. /// /// Safety: The `SimdFloat` type must be supported on the current system. #[inline(always)] diff --git a/src/graph.rs b/src/graph.rs index dfe5ae82..4780bfcf 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -94,7 +94,7 @@ impl ValueNode { } } -/// Data for a constant node (ie. model weights) in a [Graph]. +/// Data for a constant node (ie. model weights) in a [`Graph`]. pub enum ConstantNodeData { Owned(Tensor), Arc(ArcTensorView), @@ -571,7 +571,7 @@ impl<'a> CaptureEnv<'a> { } /// Options that control logging and other behaviors when executing a -/// [Model](crate::Model). +/// [`Model`](crate::Model). #[derive(Clone, Debug, Default, PartialEq)] pub struct RunOptions { /// Whether to log times spent in different operators when run completes. diff --git a/src/iter_util.rs b/src/iter_util.rs index 3e7f22bc..f0a561b9 100644 --- a/src/iter_util.rs +++ b/src/iter_util.rs @@ -2,7 +2,7 @@ use std::ops::Range; use rayon::prelude::*; -/// Iterator returned by [range_chunks]. +/// Iterator returned by [`range_chunks`]. pub struct RangeChunks { remainder: Range, chunk_size: usize, @@ -82,7 +82,7 @@ impl std::iter::FusedIterator for RangeChunksExact {} /// Return an iterator over sub-ranges of `range`. If `range.len()` is not a /// multiple of `chunk_size` then there will be a remainder after iteration -/// completes, available via [RangeChunksExact::remainder]. +/// completes, available via [`RangeChunksExact::remainder`]. #[allow(dead_code)] pub fn range_chunks_exact(range: Range, chunk_size: usize) -> RangeChunksExact { RangeChunksExact { @@ -92,7 +92,7 @@ pub fn range_chunks_exact(range: Range, chunk_size: usize) -> RangeChunks } /// Wrapper around either a serial or parallel iterator, returned by -/// [MaybeParIter::maybe_par_iter]. +/// [`MaybeParIter::maybe_par_iter`]. pub enum MaybeParallel> { Serial(SI), Parallel(PI), diff --git a/src/model.rs b/src/model.rs index 9cca23cb..764e8543 100644 --- a/src/model.rs +++ b/src/model.rs @@ -92,7 +92,7 @@ use crate::timing::TimingSort; /// loop. If such models have inputs which are constant in each iteration of the /// loop, execution can be sped up by using partial evaluation. This involves /// evaluating the part of the graph that depends only on the constant inputs -/// once, outside the loop. To do this use [Model::partial_run]. +/// once, outside the loop. To do this use [`Model::partial_run`]. /// /// ## Custom operator registries /// @@ -601,7 +601,7 @@ impl Model { /// Find a node in the model's graph given its string name. /// - /// This is a convenience method which is like [Model::find_node] but + /// This is a convenience method which is like [`Model::find_node`] but /// returns an error that includes the node's name if the node is not found. pub fn node_id(&self, id: &str) -> Result { self.find_node(id) @@ -681,7 +681,7 @@ impl Model { /// Run a model with a single input and output. /// - /// This is a simplified version of [Model::run] for the common case of + /// This is a simplified version of [`Model::run`] for the common case of /// executing a model with a single input and output. pub fn run_one( &self, @@ -719,7 +719,7 @@ impl Model { } } -/// Errors reported by [Model::load]. +/// Errors reported by [`Model::load`]. #[derive(Debug)] pub enum ModelLoadError { /// The FlatBuffers data describing the model is not supported by this diff --git a/src/model_builder.rs b/src/model_builder.rs index fd0d98f0..3193b1f6 100644 --- a/src/model_builder.rs +++ b/src/model_builder.rs @@ -200,7 +200,7 @@ enum NodeData<'a> { Operator(WIPOffset>), } -/// Arguments for [ModelBuilder::add_metadata]. +/// Arguments for [`ModelBuilder::add_metadata`]. pub struct MetadataArgs { pub onnx_hash: Option, } diff --git a/src/ops/binary_elementwise.rs b/src/ops/binary_elementwise.rs index 6ac4062e..ef18e013 100644 --- a/src/ops/binary_elementwise.rs +++ b/src/ops/binary_elementwise.rs @@ -135,7 +135,7 @@ pub fn fast_broadcast_cycles_repeats( /// Check if a tensor of shape `from_shape` can be broadcast to `to_shape` /// just by cycling the whole sequence. If so, returns the number of cycles. /// -/// This is a more restricted variant of [fast_broadcast_cycles_repeats]. +/// This is a more restricted variant of [`fast_broadcast_cycles_repeats`]. fn fast_broadcast_cycles(from_shape: &[usize], to_shape: &[usize]) -> Option { // `fast_broadcast_params` handles this case by returning `(1, n)` (ie. // 1 cycle, n repeats) but here we want to use the equivalent n cycles, @@ -610,7 +610,7 @@ boolean_cmp_op!(Less, less); boolean_cmp_op!(LessOrEqual, less_or_equal); /// Calculate the remainder of `x / y` using floored division. See -/// [DivMode] for an explanation. +/// [`DivMode`] for an explanation. fn rem_floor< T: Copy + Default + PartialOrd + std::ops::Add + std::ops::Rem, >( @@ -663,8 +663,8 @@ pub fn mod_op< #[derive(Debug)] pub struct Mod { - /// If true, use truncated division (see [DivMode::TruncDiv], otherwise - /// use flooring division (see [DivMode::FloorDiv]). + /// If true, use truncated division (see [`DivMode::TruncDiv`], otherwise + /// use flooring division (see [`DivMode::FloorDiv`]). pub fmod: bool, } @@ -742,7 +742,7 @@ impl Operator for Mul { } } -/// Like [f32::powf] but with fast paths for common values. +/// Like [`f32::powf`] but with fast paths for common values. fn powf(x: f32, y: f32) -> f32 { if y == 2. { x * x diff --git a/src/ops/conv.rs b/src/ops/conv.rs index 1295ff6d..ca4e084c 100644 --- a/src/ops/conv.rs +++ b/src/ops/conv.rs @@ -611,7 +611,7 @@ mod tests { /// Un-optimized reference implementation of convolution. /// - /// This has the same interface as [conv]. + /// This has the same interface as [`conv`]. fn reference_conv( input: TensorView, kernel: TensorView, diff --git a/src/ops/conv/im2col.rs b/src/ops/conv/im2col.rs index 1eb4e54b..0f0c1ad6 100644 --- a/src/ops/conv/im2col.rs +++ b/src/ops/conv/im2col.rs @@ -181,7 +181,7 @@ impl<'a, T: Copy + Default> VirtualIm2Col<'a, T> { } /// Pack part of an image according to the requirements of - /// [VirtualMatrix::pack_b]. + /// [`VirtualMatrix::pack_b`]. /// /// `NR_REGS` specifies the width of each column panel as a multiple of /// `S::LEN` elements. In other words, `panel_width` must exactly equal diff --git a/src/ops/mod.rs b/src/ops/mod.rs index 8706f114..a522aae1 100644 --- a/src/ops/mod.rs +++ b/src/ops/mod.rs @@ -163,7 +163,7 @@ impl Padding { } } -/// Construct a [Padding::Fixed] from a slice of paddings for each size. +/// Construct a [`Padding::Fixed`] from a slice of paddings for each size. impl> From for Padding { fn from(val: S) -> Padding { Padding::Fixed(val.as_ref().into()) @@ -724,7 +724,7 @@ pub trait Operator: Any + Debug { /// can be re-ordered without affecting the result. /// /// If true, the graph executor may swap inputs before calling the - /// [Operator::run_in_place] implementation. + /// [`Operator::run_in_place`] implementation. fn is_commutative(&self) -> bool { false } @@ -1022,7 +1022,7 @@ mod tests { expect_equal_with_tolerance(result, expected, 1e-4, 0.) } - /// Utility to simplify running a single-output [Operator] with a list of + /// Utility to simplify running a single-output [`Operator`] with a list of /// typed inputs. /// /// Usage is: diff --git a/src/ops/operators.rs b/src/ops/operators.rs index c02334b1..11ce19a1 100644 --- a/src/ops/operators.rs +++ b/src/ops/operators.rs @@ -15,7 +15,7 @@ use crate::threading::thread_pool; /// Trait which exposes ONNX operators as methods of tensors. /// /// This trait provides methods which are available on all tensor types. See -/// [FloatOperators] for additional operators which are only available on float +/// [`FloatOperators`] for additional operators which are only available on float /// tensors. pub trait Operators { type Elem; diff --git a/src/ops/random.rs b/src/ops/random.rs index 6b15212d..2452cf9c 100644 --- a/src/ops/random.rs +++ b/src/ops/random.rs @@ -46,7 +46,7 @@ pub struct RandomUniformLike { pub low: f32, pub high: f32, - /// Random seed. See [RandomUniform::seed]. + /// Random seed. See [`RandomUniform::seed`]. pub seed: Option, } @@ -112,7 +112,7 @@ pub struct RandomNormalLike { pub mean: f32, pub scale: f32, - /// Random seed. See [RandomUniform::seed]. + /// Random seed. See [`RandomUniform::seed`]. pub seed: Option, } diff --git a/src/ops/resize.rs b/src/ops/resize.rs index 2b0a252a..c4996c59 100644 --- a/src/ops/resize.rs +++ b/src/ops/resize.rs @@ -223,7 +223,7 @@ fn bilinear_resize( /// Resize an NCHW image tensor to a given `[height, width]`. /// -/// This is a simplified API for [resize]. +/// This is a simplified API for [`resize`]. pub fn resize_image(input: TensorView, size: [usize; 2]) -> Result { let [batch, chans, _height, _width] = static_dims!(input, 4)?.shape(); let [out_height, out_width] = size; diff --git a/src/tensor_pool.rs b/src/tensor_pool.rs index 23ae8f7f..5954c328 100644 --- a/src/tensor_pool.rs +++ b/src/tensor_pool.rs @@ -4,7 +4,7 @@ use std::ops::{Deref, DerefMut}; use rten_tensor::{Alloc, CowData, MutLayout, TensorBase}; /// A memory buffer that can be used to satisfy a future allocation from -/// a [TensorPool]. +/// a [`TensorPool`]. struct Buffer { /// Pointer and capacity extracted from the `Vec`. The length is always /// zero. @@ -20,7 +20,7 @@ struct Buffer { } impl Buffer { - /// Clear `vec` using [Vec::clear] and convert it into a buffer. + /// Clear `vec` using [`Vec::clear`] and convert it into a buffer. fn from_vec(mut vec: Vec) -> Buffer { let layout = std::alloc::Layout::array::(vec.capacity()).unwrap(); @@ -87,7 +87,7 @@ impl Drop for Buffer { /// buffer from the global allocator and freeing it when no longer needed, /// can provide a significant performance improvement. /// -/// [TensorPool] implements the [Alloc] trait, enabling tensors to be allocated +/// [`TensorPool`] implements the [`Alloc`] trait, enabling tensors to be allocated /// from the pool using the various `Tensor::*_in` methods, eg. /// [`Tensor::zeros_in`](rten_tensor::Tensor::zeros_in). Allocation requests /// will be satisfied from the pool if there is a suitable buffer available, or @@ -95,11 +95,11 @@ impl Drop for Buffer { /// /// When a tensor is no longer needed, it's buffer can be added to the pool /// using `pool.add(tensor.extract_buffer())`, making it available for future -/// allocations. A more convenient method is to wrap the tensor in a [PoolRef] +/// allocations. A more convenient method is to wrap the tensor in a [`PoolRef`] /// smart pointer which will auto-return the tensor to the pool when dropped. A -/// tensor can be wrapped using `tensor.auto_return(pool)`. The [PoolRef] smart +/// tensor can be wrapped using `tensor.auto_return(pool)`. The [`PoolRef`] smart /// pointer can also be used with other container types, by implementing the -/// [ExtractBuffer] trait for them. +/// [`ExtractBuffer`] trait for them. pub struct TensorPool { /// List of buffers currently in the pool. buffers: RefCell>, @@ -164,7 +164,7 @@ impl TensorPool { /// Add a data buffer to the pool. /// - /// The buffer will be cleared using [Vec::clear] and then made available + /// The buffer will be cleared using [`Vec::clear`] and then made available /// to fulfill future allocation requests. pub fn add(&self, vec: Vec) { self.buffers.borrow_mut().push(Buffer::from_vec(vec)); @@ -207,7 +207,7 @@ impl Default for TensorPool { /// Trait for extracting the data buffer from a tensor or other container. /// /// This is used to extract the buffer from a container that is no longer -/// needed, in order to return it to a [TensorPool]. +/// needed, in order to return it to a [`TensorPool`]. pub trait ExtractBuffer { type Elem; @@ -252,9 +252,9 @@ impl AutoReturn for EB { /// A smart pointer which wraps a tensor or other container and returns it to /// a pool when dropped. /// -/// [PoolRef] is not currently [Sync], so if you want to wrap a container and +/// [`PoolRef`] is not currently [`Sync`], so if you want to wrap a container and /// then reference it inside a parallel block, you will need to deref the -/// [PoolRef] outside the parallel block. +/// [`PoolRef`] outside the parallel block. pub struct PoolRef<'a, T: ExtractBuffer> { pool: &'a TensorPool, container: Option, diff --git a/src/timing.rs b/src/timing.rs index 5523b98b..67e4b326 100644 --- a/src/timing.rs +++ b/src/timing.rs @@ -52,8 +52,8 @@ impl Sub for Instant { /// Trait for text data table sources. /// -/// Tables can be formatted using [Table::display] to get a wrapper that -/// implements [Display]. +/// Tables can be formatted using [`Table::display`] to get a wrapper that +/// implements [`Display`]. trait Table { /// Return the number of rows in this table. fn rows(&self) -> usize; @@ -74,7 +74,7 @@ trait Table { .unwrap_or(0) } - /// Return a wrapper around this table which implements [Display]. + /// Return a wrapper around this table which implements [`Display`]. fn display(&self, indent: usize) -> DisplayTable where Self: Sized, @@ -163,7 +163,7 @@ impl<'a> RunTiming<'a> { } impl<'a> fmt::Display for RunTiming<'a> { - /// Format timings with the default sort order (see [TimingSort]). + /// Format timings with the default sort order (see [`TimingSort`]). fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { self.display(TimingSort::ByTime, false /* include_shapes */) .fmt(f) @@ -197,7 +197,7 @@ struct TimingByShapeRecord { node_name: String, } -/// [Display]-able table containing a breakdown of operator execution time +/// [`Display`]-able table containing a breakdown of operator execution time /// by input shape. struct TimingByShapeTable { rows: Vec,