From 40f29506e92f281f7b23cb74e1e4bcd04934b554 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 11:28:44 +0200 Subject: [PATCH 01/20] make docs more compliant with rst --- docs/index.rst | 2 +- .../primitives/matrix_multiplication/gemm_tiling.nim | 2 +- .../matrix_multiplication/gemm_ukernel_sse2.nim | 8 ++++---- src/linear_algebra/helpers/solve_lapack.nim | 6 +++--- src/tensor/private/p_kernels_interface_opencl.nim | 8 ++++---- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index b0ac76120..9354d1e44 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -144,7 +144,7 @@ Installation: Nim is available in some Linux repositories and on Homebrew for macOS. I however recommend installing Nim in your user profile via -```choosenim`` `__. Once choosenim +`choosenim `_. Once choosenim installed Nim, you can ``nimble install arraymancer`` which will pull arraymancer and all its dependencies. diff --git a/src/laser/primitives/matrix_multiplication/gemm_tiling.nim b/src/laser/primitives/matrix_multiplication/gemm_tiling.nim index 0ccf3ecf4..a29cd3177 100644 --- a/src/laser/primitives/matrix_multiplication/gemm_tiling.nim +++ b/src/laser/primitives/matrix_multiplication/gemm_tiling.nim @@ -218,7 +218,7 @@ func x86_ukernel*(cpu: CPUFeatureX86, T: typedesc, c_unit_stride: bool): MicroKe result.nb_vecs_nr = NbVecs[cpu] # SIMD vectors of B result.nr = result.nb_vecs_nr * result.nb_scalars -############################################# +# ############################################# # Workaround "undeclared identifier mr or nr" # for some reason the compiler cannot access fields in # the static MicroKernel. diff --git a/src/laser/primitives/matrix_multiplication/gemm_ukernel_sse2.nim b/src/laser/primitives/matrix_multiplication/gemm_ukernel_sse2.nim index c6f844d5a..2ec32e034 100644 --- a/src/laser/primitives/matrix_multiplication/gemm_ukernel_sse2.nim +++ b/src/laser/primitives/matrix_multiplication/gemm_ukernel_sse2.nim @@ -25,11 +25,11 @@ ukernel_generator( simd_fma = float64x2_muladd_unfused ) -####################################### +# ####################################### # # Int32: hack to unroll scalar code # -####################################### +# ####################################### # This is faster than using the fallback for mm_mullo_epi32 # in laser/primitives/private/sse2_utils @@ -80,11 +80,11 @@ ukernel_generator( ) -####################################### +# ####################################### # # Int64: hack to unroll scalar code # -####################################### +# ####################################### type Int64x2 = array[2, int64] diff --git a/src/linear_algebra/helpers/solve_lapack.nim b/src/linear_algebra/helpers/solve_lapack.nim index 68ab1198c..3d38566c5 100644 --- a/src/linear_algebra/helpers/solve_lapack.nim +++ b/src/linear_algebra/helpers/solve_lapack.nim @@ -7,15 +7,15 @@ import ./overload, ../../tensor/tensor -# Wrappers for Fortran LAPACK linear equation driver routines *SV -# Currently only *GESV is wrapped +# Wrappers for Fortran LAPACK linear equation driver routines `*SV` +# Currently only `*GESV` is wrapped # TODO: Implement GBSV, GTSV, POSV, PBSV, PTSV, SYSV overload(gesv, sgesv) overload(gesv, dgesv) proc gesv*[T: SomeFloat](a, b: var Tensor[T], pivot_indices: var seq[int32]) = - ## Wrapper for LAPACK *gesv routines + ## Wrapper for LAPACK `*gesv` routines ## Solve AX = B for general matrix ## ## In-place version, this will overwrite a and b diff --git a/src/tensor/private/p_kernels_interface_opencl.nim b/src/tensor/private/p_kernels_interface_opencl.nim index 736fef7a4..4b9f7e5d7 100644 --- a/src/tensor/private/p_kernels_interface_opencl.nim +++ b/src/tensor/private/p_kernels_interface_opencl.nim @@ -126,11 +126,11 @@ template genClInfixOp*( T: typedesc, export procName template gen_cl_apply2*(kern_name, ctype, op: string): string = - ## Generates an OpenCL kernel for an elementwise in-place binary infix operation (like +=, -=, *.= or /.=) + ## Generates an OpenCL kernel for an elementwise in-place binary infix operation (like `+=, -=, *.= or /.=`) ## Input: ## - The C type ## - The C kernel name (this only helps debugging the C code) - ## - The C operation (+=, -=, *.= or /.=) + ## - The C operation (`+=, -=, *.= or /.=`) opencl_getIndexOfElementID() & """ __kernel @@ -165,13 +165,13 @@ template genClInPlaceOp*( T: typedesc, cInfixOp: string, exported: static[bool] = true): untyped = ## Generates an OpenCL kernel for an elementwise in-place binary - ## infix operation (like +=, -=, *.= or /.=) + ## infix operation (like `+=, -=, *.= or /.=`) ## Input: ## - The Nim type of the elements of the input tensors ## - The equivalent C type ## - The Nim identifier of the resulting proc ## - The C kernel name (this only helps debugging the C code) - ## - The C operation (+=, -=, *.= or /.=) + ## - The C operation (`+=, -=, *.= or /.=`) proc procName(dst: var ClTensor[T], src: ClTensor[T]) = when compileOption("boundChecks"): From 56c367d034afed66119a9238f360cfbcbfd10588 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:17:25 +0200 Subject: [PATCH 02/20] fix documentation in header of einsum --- src/tensor/einsum.nim | 311 +++++++++++++++++++++--------------------- 1 file changed, 155 insertions(+), 156 deletions(-) diff --git a/src/tensor/einsum.nim b/src/tensor/einsum.nim index b414a407a..ad7cf6a37 100644 --- a/src/tensor/einsum.nim +++ b/src/tensor/einsum.nim @@ -4,163 +4,162 @@ import ./shapeshifting # Note: importing shapeshifting_cuda will trigger a Nim inference bug # in genContiguous with no workaround -#[ -This module provides Einstein summation for an arbitrary number of tensors. +## This module provides Einstein summation for an arbitrary number of tensors. +## +## Einstein summation describes a special application of +## `index notation `_ +## in which indices that appear more than once are implicitly summed over. +## This allows for a concise notation of many vector / matrix / tensor calculations, +## while exactly representing the required calculation. +## +## In general Einstein summation is a subset of +## `Ricci calculus `_. +## +## The implementation of `einsum` in different languages however, typically goes +## above and beyond actual Einstein summation, allowing for many aspects of +## Ricci calculus. +## +## Simple Einstein summation examples +## ================================== +## +## Typical examples include matrix-vector multiplcation, matrix-matrix multiplication +## or the cross product. The examples below use the `einsum` / notation for the +## elements of tensors, namely `m[i,j]` for element `i,j` of the matrix ``m``, instead of +## the more mathematical notation `m_ij`. +## +## Matrix-vector multiplication +## ---------------------------- +## +## Let ``m`` be an `NxM` matrix and ``v`` a `M` vector. Then matrix-vector multiplication +## `m * v` is defined as: +## `w[i] = \sum_j m[i,j] * v[j]`. +## The result is an `N` vector ``w`` consisting of elements `w[i]`. +## Since `j` appears twice on the RHS of the equation, Einstein summation implies that +## the sum over `j` is implicit, hence we can write: +## +## `w[i] = m[i,j] * v[j]`. +## +## Matrix-matrix multiplication +## ---------------------------- +## +## The same can be applied to matrix-matrix multiplication. Let ``m``, ``n`` be two +## compatible matrices (both `NxN` or `NxM` and `MxN`) with elements `m[i,j]` and +## `n[i,j]`. Matrix-matrix multiplication is defined as +## +## `a[i,k] = \sum_j m[i,j] * n[j,k]` +## +## and thus in Einstein summation: +## +## `a[i,k] = m[i,j] * n[j,k]`. +## +## Cross-product of two vectors +## ---------------------------- +## +## The cross product of two 3 vectors ``v``, ``w`` can be conveniently defined using +## the `Levi-Civita symbol `_ +## `\epsilon_{ijk}`: +## +## `a[i] = \epsilon_{ijk} v[j] * w[k]`, +## +## which implies `j` and `k` are summed over, while `i` is kept for the resulting tensor. +## +## More complex examples +## ===================== +## +## In this implementation of `einsum` (similar to other `einsum` implementations), +## it's also possible to explicitly keep different dimensions of the multiplied +## tensors or even perform calculations without a single index appearing mutliple +## times, for instance to transpose a tensor. For these cases the explicit form +## of the `einsum` macro has to be used, see below. +## +## Transposition of a matrix +## ------------------------- +## +## Transposition of a matrix can be expressed in index notation simply as an +## exchange of indices, namely let ``m`` be an `NxM` matrix, the transposed +## `MxN` matrix ``m^T`` is written as: +## +## `m[j,i] = m[i,j]`. +## +## Hadamard product +## ---------------- +## +## The Hadamard product defines the product of two `NxM` matrices ``n``, ``m`` +## in which the matrices are multiplied element wise. It is a good example +## of the extension of `einsum` over standard Einstein summation: +## +## `a[i,j] = m[i,j] * n[i,j]`. +## +## Naive Einstein summation would demand a sum over both `i` and `j`, resulting +## in a scalar on the LHS instead of another `NxM` matrix. +## +## Contracting a whole matrix +## -------------------------- +## +## Contraction of a full matrix describes summing all elements of a matrix +## ``m``, resulting in a scalar `a`. It is expressed by: +## +## `a = m[i,i]`. +## +## The `einsum` macro +## ================== +## +## The `einsum` macro provides two different usage paradigms. +## * implicit <- normal Einstein summation +## * explicit <- potential extended Einstein summation +## +## The macro takes a `varargs[Tensor]` and a single statement. It +## returns a `Tensor[T]`, where `T` is deduced from the subtype of the +## given tensors, if the result is not a scalar. For a scalar result +## the return value is of type `T`. Note that the type of all given tensors +## must match! +## +## The statement given to the macro is just a single line making use of +## Einstein summation as in all the examples above. As a matter of fact +## all examples above are valid statements for the `einsum` macro! +## +## Of course only tensors, which are given to the macro in the `varargs` +## may be used in the statement. +## +## If only the `RHS` of the examples above are given, the required indices +## for the resulting tensor are automatically calculated using pure Einstein +## summation. Assuming `a`, `b` are two 2D arraymancer tensors , we could +## express their matrix mutliplcation as +## +## .. code:: nim +## let c = einsum(a, b): +## a[i,j] * b[j,k] +## +## Of course the same can be written in explicit form: +## +## .. code:: nim +## let c = einsum(a, b): +## c[i,k] = a[i,j] * b[j,k] +## +## A few things must be noted here for the explicit case: +## * the indices on the LHS are taken as "the truth"! Any index appearing here +## will ``not`` be summed over. +## * the order on the LHS is taken into account, allowing for transposing +## dimensions. +## * the identifier used on the LHS is arbitrary. It can match what the user assigns +## to, but need not. +## +## For many more examples for typical applications, take a look at the test case +## `<../../tests/tensor/test_einsum.nim>`_. +## +## Implementation details +## ---------------------- +## +## The macro calculates, which indices must be contracted and which remain in the +## final tensor. For each appearing index (of either case) we create a for loop, +## while the contracting for loops appear within the non contracting indices. +## +## The macro creates a `block`, in which the code is produced and returns the +## temporary tensor used in it. +## +## It also forces the tensors into contiguous, row major form by creating +## local copies with `asContiguous`. -Einstein summation describes a special application of -`index notation `_ -in which indices that appear more than once are implicitly summed over. -This allows for a concise notation of many vector / matrix / tensor calculations, -while exactly representing the required calculation. - -In general Einstein summation is a subset of -`Ricci calculus `_. - -The implementation of `einsum` in different languages however, typically goes -above and beyond actual Einstein summation, allowing for many aspects of -Ricci calculus. - -Simple Einstein summation examples -================================== - -Typical examples include matrix-vector multiplcation, matrix-matrix multiplication -or the cross product. The examples below use the `einsum` / notation for the -elements of tensors, namely `m[i,j]` for element `i,j` of the matrix ``m``, instead of -the more mathematical notation `m_ij`. - -Matrix-vector multiplication ----------------------------- - -Let ``m`` be an `NxM` matrix and ``v`` a `M` vector. Then matrix-vector multiplication -`m * v` is defined as: -`w[i] = \sum_j m[i,j] * v[j]`. -The result is an `N` vector ``w`` consisting of elements `w[i]`. -Since `j` appears twice on the RHS of the equation, Einstein summation implies that -the sum over `j` is implicit, hence we can write: - -`w[i] = m[i,j] * v[j]`. - -Matrix-matrix multiplication ----------------------------- - -The same can be applied to matrix-matrix multiplication. Let ``m``, ``n`` be two -compatible matrices (both `NxN` or `NxM` and `MxN`) with elements `m[i,j]` and -`n[i,j]`. Matrix-matrix multiplication is defined as - -`a[i,k] = \sum_j m[i,j] * n[j,k]` - -and thus in Einstein summation: - -`a[i,k] = m[i,j] * n[j,k]`. - -Cross-product of two vectors ----------------------------- - -The cross product of two 3 vectors ``v``, ``w`` can be conveniently defined using -the `Levi-Civita symbol `_ -`\epsilon_{ijk}`: - -`a[i] = \epsilon_{ijk} v[j] * w[k]`, - -which implies `j` and `k` are summed over, while `i` is kept for the resulting tensor. - -More complex examples -===================== - -In this implementation of `einsum` (similar to other `einsum` implementations), -it's also possible to explicitly keep different dimensions of the multiplied -tensors or even perform calculations without a single index appearing mutliple -times, for instance to transpose a tensor. For these cases the explicit form -of the `einsum` macro has to be used, see below. - -Transposition of a matrix -------------------------- - -Transposition of a matrix can be expressed in index notation simply as an -exchange of indices, namely let ``m`` be an `NxM` matrix, the transposed -`MxN` matrix ``m^T`` is written as: - -`m[j,i] = m[i,j]`. - -Hadamard product ----------------- - -The Hadamard product defines the product of two `NxM` matrices ``n``, ``m`` -in which the matrices are multiplied element wise. It is a good example -of the extension of `einsum` over standard Einstein summation: - -`a[i,j] = m[i,j] * n[i,j]`. - -Naive Einstein summation would demand a sum over both `i` and `j`, resulting -in a scalar on the LHS instead of another `NxM` matrix. - -Contracting a whole matrix --------------------------- - -Contraction of a full matrix describes summing all elements of a matrix -``m``, resulting in a scalar `a`. It is expressed by: - -`a = m[i,i]`. - -The `einsum` macro -================== - -The `einsum` macro provides two different usage paradigms. -* implicit <- normal Einstein summation -* explicit <- potential extended Einstein summation - -The macro takes a `varargs[Tensor]` and a single statement. It -returns a `Tensor[T]`, where `T` is deduced from the subtype of the -given tensors, if the result is not a scalar. For a scalar result -the return value is of type `T`. Note that the type of all given tensors -must match! - -The statement given to the macro is just a single line making use of -Einstein summation as in all the examples above. As a matter of fact -all examples above are valid statements for the `einsum` macro! - -Of course only tensors, which are given to the macro in the `varargs` -may be used in the statement. - -If only the `RHS` of the examples above are given, the required indices -for the resulting tensor are automatically calculated using pure Einstein -summation. Assuming `a`, `b` are two 2D arraymancer tensors , we could -express their matrix mutliplcation as - -.. code:: nim - let c = einsum(a, b): - a[i,j] * b[j,k] - -Of course the same can be written in explicit form: - -.. code:: nim - let c = einsum(a, b): - c[i,k] = a[i,j] * b[j,k] - -A few things must be noted here for the explicit case: -* the indices on the LHS are taken as "the truth"! Any index appearing here - will ``not`` be summed over. -* the order on the LHS is taken into account, allowing for transposing - dimensions. -* the identifier used on the LHS is arbitrary. It can match what the user assigns - to, but need not. - -For many more examples for typical applications, take a look at the test case -`<../../tests/tensor/test_einsum.nim>`_. - -Implementation details ----------------------- - -The macro calculates, which indices must be contracted and which remain in the -final tensor. For each appearing index (of either case) we create a for loop, -while the contracting for loops appear within the non contracting indices. - -The macro creates a `block`, in which the code is produced and returns the -temporary tensor used in it. - -It also forces the tensors into contiguous, row major form by creating -local copies with `asContiguous`. -]# type # enum which stores whether an `einsum` call is explicit `skAssign` (statement From a21a957c98598fc3b8418be2736a5707c8900cba Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:18:14 +0200 Subject: [PATCH 03/20] fix openmp import path in `blas_l3_gemm_macro_kernel.nim` --- src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim b/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim index eff49f9f4..1bb805eb0 100644 --- a/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim +++ b/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ../backend/openmp +import ../../backend/openmp proc gemm_macro_kernel[T](mc, nc, kc: int, alpha: T, From 562a6e8583bad8468ffa721e9c7734c94c778ce5 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:18:50 +0200 Subject: [PATCH 04/20] fix type import in nnpack_interface.nim --- src/nn_primitives/backend/nnpack_interface.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nn_primitives/backend/nnpack_interface.nim b/src/nn_primitives/backend/nnpack_interface.nim index 1fd8fdf08..2defc6532 100644 --- a/src/nn_primitives/backend/nnpack_interface.nim +++ b/src/nn_primitives/backend/nnpack_interface.nim @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ../../tensor/tensor, ../types +import ../../tensor/tensor, ../private/p_nnp_types import ./nnpack proc nnpack_conv2d*(input, weight, bias: Tensor[float32], padding, stride: Size2D): Tensor[float32] {.noInit.}= # TODO use a single convention, return value or var result From 808ca559858276719b73fe57a8272c4bdb9257a4 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:19:30 +0200 Subject: [PATCH 05/20] make all `noInit` pragmas consistent Well, nim still complains. All `noInit` pragmas should be the other way round I fear. :| --- .../matrix_multiplication/gemm_ukernel_generator.nim | 4 ++-- src/nn_primitives/backend/cudnn.nim | 2 +- src/nn_primitives/nnp_maxpooling.nim | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/laser/primitives/matrix_multiplication/gemm_ukernel_generator.nim b/src/laser/primitives/matrix_multiplication/gemm_ukernel_generator.nim index f6dc50058..a99afe5a0 100644 --- a/src/laser/primitives/matrix_multiplication/gemm_ukernel_generator.nim +++ b/src/laser/primitives/matrix_multiplication/gemm_ukernel_generator.nim @@ -188,10 +188,10 @@ macro ukernel_simd_impl*( var declBody = newStmtList() for a in rA: declBody.add quote do: - var `a`{.noinit.}: `V` + var `a`{.noInit.}: `V` for b in rB: declBody.add quote do: - var `b`{.noinit.}: `V` + var `b`{.noInit.}: `V` for i in 0 ..< MR: for j in 0 ..< NbVecs: let ab = rAB[i][j] diff --git a/src/nn_primitives/backend/cudnn.nim b/src/nn_primitives/backend/cudnn.nim index 211f8f49b..a57aa7a10 100644 --- a/src/nn_primitives/backend/cudnn.nim +++ b/src/nn_primitives/backend/cudnn.nim @@ -56,7 +56,7 @@ template asCudnnType*[T: SomeFloat](typ: typedesc[T]): cudnnDataType_t = # ##################################################################### # Tensor descriptor -proc newCudnn4DTensorDesc*[T: SomeFloat](t: CudaTensor[T]): cudnnTensorDescriptor_t {.inline, noinit.}= +proc newCudnn4DTensorDesc*[T: SomeFloat](t: CudaTensor[T]): cudnnTensorDescriptor_t {.inline, noInit.}= # TODO: destroy descriptor automatically # TODO: generalize with the NDTensor Desc check cudnnCreateTensorDescriptor(result.addr) diff --git a/src/nn_primitives/nnp_maxpooling.nim b/src/nn_primitives/nnp_maxpooling.nim index a6ff1e5d6..654d42a4c 100644 --- a/src/nn_primitives/nnp_maxpooling.nim +++ b/src/nn_primitives/nnp_maxpooling.nim @@ -21,7 +21,7 @@ proc maxpool2d*[T](input: Tensor[T], kernel: Size2D, padding: Size2D = (0,0), stride: Size2D = (1,1) - ): tuple[max_indices: Tensor[int], maxpooled: Tensor[T]] {.noinit.}= + ): tuple[max_indices: Tensor[int], maxpooled: Tensor[T]] {.noInit.}= ## MaxPool 2D forward pass assert input.rank == 4 and input.is_C_contiguous From c0a647ea83bd8436868b580fcb0bccaaa579210a Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:20:04 +0200 Subject: [PATCH 06/20] add missing import for `auxiliary_lapack` "main module" --- src/linear_algebra/helpers/auxiliary_lapack.nim | 1 + 1 file changed, 1 insertion(+) diff --git a/src/linear_algebra/helpers/auxiliary_lapack.nim b/src/linear_algebra/helpers/auxiliary_lapack.nim index 8ad74c913..e198105bd 100644 --- a/src/linear_algebra/helpers/auxiliary_lapack.nim +++ b/src/linear_algebra/helpers/auxiliary_lapack.nim @@ -167,6 +167,7 @@ proc ormqr*[T: SomeFloat](C: var Tensor[T], Q: Tensor[T], tau: openarray[T], sid when isMainModule: import ./decomposition_lapack import ../../ml/metrics/common_error_functions + import ../../private/sequninit let a = [[12.0, -51.0, 4.0], [ 6.0, 167.0, -68.0], From 4340d21186e49e8716a70d06addabcbf768228cf Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:20:25 +0200 Subject: [PATCH 07/20] [laser] disallow `gemm_packed.nim` main proc, not compatible I assume the `when isMainModule` parts are straight from laser. However, the tensor definitions are missing / in different files in arraymancer and thus the code is broken. --- .../primitives/matrix_multiplication/gemm_prepacked.nim | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/laser/primitives/matrix_multiplication/gemm_prepacked.nim b/src/laser/primitives/matrix_multiplication/gemm_prepacked.nim index 1f3a90584..9e8338d4c 100644 --- a/src/laser/primitives/matrix_multiplication/gemm_prepacked.nim +++ b/src/laser/primitives/matrix_multiplication/gemm_prepacked.nim @@ -298,8 +298,9 @@ proc gemm_packed*[T: SomeNumber]( # # ############################################################ -when isMainModule: - +when false: + ## these tests don't work in arraymancer, since the imported files are not + ## part of arraymancer's repository. import ../../tensor/[allocator, datatypes, initialization], strformat From 8ba800c46b6f89d257e3c90781729fa6a5056c26 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:32:36 +0200 Subject: [PATCH 08/20] add code to generate nimdoc.cfg and documentation --- arraymancer.nimble | 89 ++--------- docs/docs.nim | 154 +++++++++++++++++++ docs/generateNimdocCfg.nim | 307 +++++++++++++++++++++++++++++++++++++ 3 files changed, 471 insertions(+), 79 deletions(-) create mode 100644 docs/docs.nim create mode 100644 docs/generateNimdocCfg.nim diff --git a/arraymancer.nimble b/arraymancer.nimble index b8a3178b4..ed4dd6041 100644 --- a/arraymancer.nimble +++ b/arraymancer.nimble @@ -229,88 +229,19 @@ task test_mkl_omp, "Run all tests - Intel MKL + OpenMP": task test_release, "Run all tests - Release mode": test "tests_cpu", " -d:release" -task gen_doc, "Generate Arraymancer documentation": - # TODO: Industrialize: something more robust that only check nim files (and not .DS_Store ...) - for filePath in listFiles("src/tensor/"): - let modName = filePath[11..^5] # Removing src/tensor/ (11 chars) and .nim (4 chars) # TODO: something more robust - # Cuda doc is broken https://github.com/nim-lang/Nim/issues/6910 - # Delete doc comment from nimcuda before using this - exec r"nim doc -o:docs/build/tensor." & modName & ".html " & filePath - - for filePath in listFiles("src/nn_primitives/"): - let modName = filePath[18..^5] # Removing src/nn_primitives/ (18 chars) and .nim (4 chars) # TODO: something more robust - # Cuda doc is broken https://github.com/nim-lang/Nim/issues/6910 - # Delete doc comment from nimcuda before using this - exec r"nim doc -o:docs/build/nnp." & modName & ".html " & filePath - - for filePath in listFiles("src/autograd/"): - let modName = filePath[13..^5] # Removing src/autograd/ (13 chars) and .nim (4 chars) # TODO: something more robust - exec r"nim doc -o:docs/build/ag." & modName & ".html " & filePath - - for filePath in listFiles("src/nn/"): - let modName = filePath[7..^5] # Removing src/nn_primitives/ (18 chars) and .nim (4 chars) # TODO: something more robust - exec r"nim doc -o:docs/build/nn." & modName & ".html " & filePath - - # TODO auto check subdir - for filePath in listFiles("src/nn/activation/"): - let modName = filePath[18..^5] - exec r"nim doc -o:docs/build/nn_activation." & modName & ".html " & filePath - - for filePath in listFiles("src/nn/layers/"): - let modName = filePath[14..^5] - exec r"nim doc -o:docs/build/nn_layers." & modName & ".html " & filePath - - for filePath in listFiles("src/nn/loss/"): - let modName = filePath[12..^5] - exec r"nim doc -o:docs/build/nn_loss." & modName & ".html " & filePath - - for filePath in listFiles("src/nn/optimizers/"): - let modName = filePath[18..^5] - exec r"nim doc -o:docs/build/nn_optimizers." & modName & ".html " & filePath - - for filePath in listFiles("src/nn_dsl/"): - let modName = filePath[11..^5] - exec r"nim doc -o:docs/build/nn_dsl." & modName & ".html " & filePath - - for filePath in listFiles("src/linear_algebra/"): - let modName = filePath[19..^5] - exec r"nim doc -o:docs/build/la." & modName & ".html " & filePath - - for filePath in listFiles("src/stats/"): - let modName = filePath[10..^5] - exec r"nim doc -o:docs/build/stats." & modName & ".html " & filePath - - for filePath in listFiles("src/ml/clustering/"): - let modName = filePath[18..^5] - exec r"nim doc -o:docs/build/ml." & modName & ".html " & filePath - - for filePath in listFiles("src/ml/dimensionality_reduction/"): - let modName = filePath[32..^5] - exec r"nim doc -o:docs/build/ml." & modName & ".html " & filePath - - for filePath in listFiles("src/ml/metrics/"): - let modName = filePath[15..^5] - exec r"nim doc -o:docs/build/ml." & modName & ".html " & filePath - - block: - let filePath = "src/nlp/tokenizers.nim" - let modName = filePath[8..^5] - exec r"nim doc -o:docs/build/nlp." & modName & ".html " & filePath - - for filePath in listFiles("src/io/"): - let modName = filePath[7..^5] - exec r"nim doc -o:docs/build/io." & modName & ".html " & filePath - - for filePath in listFiles("src/datasets/"): - let modName = filePath[13..^5] - exec r"nim doc -o:docs/build/datasets." & modName & ".html " & filePath +import docs / [docs, generateNimdocCfg] +task gen_docs, "Generate Arraymancer documentation": + # generate nimdoc.cfg file so we can generate the correct header for the + # index.html page without having to mess with the HTML manually. + genNimdocCfg("src/") + # build the actual docs and the index + buildDocs("src/", "docs/build") + # Copy our stylesheets + cpFile("docs/docutils.css", "docs/build/docutils.css") + cpFile("docs/nav.css", "docs/build/nav.css") # Process the rst for filePath in listFiles("docs/"): if filePath[^4..^1] == ".rst": let modName = filePath[5..^5] exec r"nim rst2html -o:docs/build/" & modName & ".html " & filePath - - # Copy stylesheets - cpFile("docs/docutils.css", "docs/build/docutils.css") - cpFile("docs/nav.css", "docs/build/nav.css") diff --git a/docs/docs.nim b/docs/docs.nim new file mode 100644 index 000000000..e36e2d86e --- /dev/null +++ b/docs/docs.nim @@ -0,0 +1,154 @@ +import macros, strformat, strutils, sequtils, sets + +from os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/` + +when defined(nimdoc): + from os import getCurrentDir, paramCount, paramStr + +#[ +This file is a slightly modified version of the same file of `nimterop`: +https://github.com/nimterop/nimterop/blob/master/nimterop/docs.nim +]# + + +proc getNimRootDir(): string = + #[ + hack, but works + alternatively (but more complex), use (from a nim file, not nims otherwise + you get Error: ambiguous call; both system.fileExists): + import "$nim/testament/lib/stdtest/specialpaths.nim" + nimRootDir + ]# + fmt"{currentSourcePath}".parentDir.parentDir.parentDir + +const + DirSep = when defined(windows): '\\' else: '/' + +proc execAction(cmd: string): string = + var + ccmd = "" + ret = 0 + when defined(Windows): + ccmd = "cmd /c " & cmd + elif defined(posix): + ccmd = cmd + else: + doAssert false + + (result, ret) = gorgeEx(ccmd) + doAssert ret == 0, "Command failed: " & $ret & "\ncmd: " & ccmd & "\nresult:\n" & result + +template genRemove(name: untyped): untyped = + proc `name`(s, toRemove: string): string = + result = s + result.`name`(toRemove) +genRemove(removePrefix) +genRemove(removeSuffix) + +proc getFiles*(path: string): seq[string] = + # Add files and dirs here, which should be skipped. + #const excludeDirs = [] + #let ExcludeDirSet = toSet(excludeDirs) + #if path.extractFilename in ExcludeDirSet: return + # The files below are not valid by themselves, they are only included + # from other files + const excludeFiles = [ "blas_l3_gemm_aux.nim", + "blas_l3_gemm_data_structure.nim", + "blas_l3_gemm_macro_kernel.nim", + "blas_l3_gemm_micro_kernel.nim", + "blas_l3_gemm_packing.nim", + "p_checks_cuda.nim", + "p_checks_opencl.nim", + "blis_api.nim" ] + let ExcludeFileSet = toSet(excludeFiles) + + for file in listFiles(path): + if file.endsWith(".nim") and file.extractFilename notin ExcludeFileSet: + result.add file + for dir in listDirs(path): + result.add getFiles(dir) + +proc buildDocs*(path: string, docPath: string, baseDir = getProjectPath() & $DirSep, + defines: openArray[string] = @[]) = + ## Generate docs for all nim files in `path` and output all HTML files to the + ## `docPath` in a flattened form (subdirectories are removed). + ## + ## If duplicate filenames are detected, they will be printed at the end. + ## + ## `baseDir` is the project path by default and `files` and `path` are relative + ## to that directory. Set to "" if using absolute paths. + ## + ## `defines` is a list of `-d:xxx` define flags (the `xxx` part) that should be passed + ## to `nim doc` so that `getHeader()` is invoked correctly. + ## + ## Use the `--publish` flag with nimble to publish docs contained in + ## `path` to Github in the `gh-pages` branch. This requires the ghp-import + ## package for Python: `pip install ghp-import` + ## + ## WARNING: `--publish` will destroy any existing content in this branch. + ## + ## NOTE: `buildDocs()` only works correctly on Windows with Nim 1.0+ since + ## https://github.com/nim-lang/Nim/pull/11814 is required. + when defined(windows) and (NimMajor, NimMinor, NimPatch) < (1, 0, 0): + echo "buildDocs() unsupported on Windows for Nim < 1.0 - requires PR #11814" + else: + let + baseDir = + if baseDir == $DirSep: + getCurrentDir() & $DirSep + else: + baseDir + docPath = baseDir & docPath + path = baseDir & path + defStr = block: + var defStr = "" + for def in defines: + defStr &= " -d:" & def + defStr + nim = getCurrentCompilerExe() + + # now we walk the whole `path` and build the documentation for each `.nim` file. + # While doing that we flatten the directory structure for the generated HTML files. + # `src/foo/bar/baz.nim` just becomes + # `docPath/baz.html`. + # This allows for all files to be in the `docPath` directory, which means each + # file will be able to find the `dochack.js` file, which will be put into + # the `docPath` directory, too (the inclusion of the `dochack.js` is done statically + # via our generated nimdoc.cfg file and is fixed for each generated HTML). + let files = getFiles(path) + var idx = 0 + var fileSet = initHashSet[string]() + var duplSet = initHashSet[string]() + for file in files: + let baseName = file.extractFilename() + let relPath = file.removePrefix(path).removeSuffix(baseName) + let prefix = relPath.strip(chars = {'/'}) # remove possible trailing `/` + .split('/') # split path parts + .join(".") # concat by `.` instead + var outfile = baseName.replace(".nim", ".html") + if outfile in fileSet: + duplSet.incl outfile + else: + fileSet.incl outfile + outfile = docPath / outfile + echo "Processing: ", outfile, " [", idx, "/", files.len, "]" + # now call `nim doc` on each file + echo execAction(&"{nim} doc {defStr} -o:{outfile} --index:on {file}") + inc idx + ## now build the index + echo execAction(&"{nim} buildIndex -o:{docPath}/theindex.html {docPath}") + when declared(getNimRootDir): + #[ + this enables doc search, works at least locally with: + cd {docPath} && python -m SimpleHTTPServer 9009 + ]# + echo execAction(&"{nim} js -o:{docPath}/dochack.js {getNimRootDir()}/tools/dochack/dochack.nim") + + for i in 0 .. paramCount(): + if paramStr(i) == "--publish": + echo execAction(&"cd {docPath} && ghp-import --no-jekyll -fp {docPath}") + break + + # echo "Processed files: ", fileSet + if duplSet.card > 0: + echo "WARNING: Duplicate filenames detected: ", duplSet diff --git a/docs/generateNimdocCfg.nim b/docs/generateNimdocCfg.nim new file mode 100644 index 000000000..8a46d94ba --- /dev/null +++ b/docs/generateNimdocCfg.nim @@ -0,0 +1,307 @@ +import strutils, docs, tables, sequtils, macros, strformat, algorithm +from os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/` + +## Arraymancer documentation generation + + +const gitUrl* = "https://github.com/mratsim/arraymancer" + +const docItemSeeSrc* = """  Source +Edit +""" + +# TODO: industrialize similar to Nim website: https://github.com/nim-lang/Nim/blob/e758b9408e8fe935117f7f793164f1c9b74cec06/tools/nimweb.nim#L45 +# And: https://github.com/nim-lang/Nim/blob/d3f966922ef4ddd05c137f82e5b2329b3d5dc485/web/website.ini#L31 + +# TODO: move the technical reference to the end (need some CSS so that elements are properly placed) + +const docFileTmpl* = """ + + + + + + + + + + + + + + + + + +$title + + + + + + + + + +Arraymancer - $title + + + + + + + +Fork me on GitHub + + +
+
+

$title

+ $content +
+ +
+
+p
+$analytics +""" + +const headerTmpl* = """ +
+ Arraymancer + + Technical reference + + + + Tutorial + + + + Spellbook (How-To's) + + + + Under the hood + + +
+ + +""" + +let nameMap = { + "dsl_core.html" : "Neural network: Declaration", + "relu.html" : "Activation: Relu (Rectified linear Unit)", + "sigmoid.html" : "Activation: Sigmoid", + "tanh.html" : "Activation: Tanh", + "conv2D.html" : "Layers: Convolution 2D", + "embedding.html" : "Layers: Embedding", + "gru.html" : "Layers: GRU (Gated Linear Unit)", + "linear.html" : "Layers: Linear/Dense", + "maxpool2D.html" : "Layers: Maxpool 2D", + "cross_entropy_losses.html" : "Loss: Cross-Entropy losses", + "mean_square_error_loss.html" : "Loss: Mean Square Error", + + "reshape_flatten.html" : "Reshape & Flatten", + + "decomposition.html" : "Eigenvalue decomposition", + "decomposition_rand.html" : "Randomized Truncated SVD", + "least_squares.html" : "Least squares solver", + "linear_systems.html" : "Linear systems solver", + "special_matrices.html" : "Special linear algebra matrices", + "stats.html" : "Statistics", + "pca.html" : "Principal Component Analysis (PCA)", + "accuracy_score.html" : "Accuracy score", + "common_error_functions.html" : "Common errors, MAE and MSE (L1, L2 loss)", + "kmeans.html" : "K-Means", + + "mnist.html" : "MNIST", + "imdb.html" : "IMDB", + "io_csv.html" : "CSV reading and writing", + "io_hdf5.html" : "HDF5 files reading and writing", + "io_image.html" : "Images reading and writing", + "io_npy.html" : "Numpy files reading and writing", + + "autograd_common.html" : "Data structure", + "gates_basic.html" : "Basic operations", + "gates_blas.html" : "Linear algebra operations", + "gates_hadamard.html" : "Hadamard product (elementwise matrix multiply)", + "gates_reduce.html" : "Reduction operations", + "gates_shapeshifting_concat_split.html" : "Concatenation, stacking, splitting, chunking operations", + "gates_shapeshifting_views.html" : "Linear algebra operations", + + "nnp_activation.html" : "Activations", + "nnp_convolution.html" : "Convolution 2D", + "nnp_conv2D_cudnn.html" : "Convolution 2D - CuDNN", + "nnp_embedding.html" : "Embeddings", + "nnp_gru.html" : "Gated Recurrent Unit (GRU)", + "nnp_linear.html" : "Linear / Dense layer", + "nnp_maxpooling.html" : "Maxpooling", + "nnp_numerical_gradient.html" : "Numerical gradient", + "nnp_sigmoid_cross_entropy.html" : "Sigmoid Cross-Entropy loss", + "nnp_softmax_cross_entropy.html" : "Softmax Cross-Entropy loss", +}.toTable + +proc wrap(name: string): string = + const tmpl = """
  • $#
  • """ + if name in nameMap: + result = tmpl % [name, nameMap[name]] + else: + result = tmpl % [name, name] + +proc genNimdocCfg*(path: string) = + ## This proc generates the `nimdoc.cfg`, which sits at the root of the + ## arraymancer repository. We generate it so that we can combine the + ## front page template derived from flyx's NimYaml: https://github.com/flyx/NimYAML + ## with the standard Nim document generation. We generate the fields for + ## the header links from the actual files found in each diretory. + ## + ## NOTE: manual intervention is required for each directory that is added + ## and should show up as its own tab in the header. Essentially look at the + ## `$` spans in the `docFileTmpl` above to see what to do. + let files = getFiles(path) + let catMap = { "tensor" : 1, + "nn" : 2, + "nn_dsl" : 2, + "la" : 3, + "stats" : 3, + "ml" : 3, + "datasets" : 4, + "io" : 4, + "ag" : 5 , + "nn_primitives" : 6 }.toTable + var spanMap = newSeq[seq[string]](6) + + for file in files: + let baseName = file.extractFilename() + var outfile = baseName.replace(".nim", ".html") + let subDir = file.parentDir.extractFilename + if subDir in catMap: + echo subDir + let idx = catMap[subDir] - 1 + spanMap[idx].add outfile + else: + echo "!! subDir ", subDir + + var spans = newSeq[string](6) + for idx in 0 ..< spans.len: + spans[idx] = spanMap[idx].sorted.mapIt(wrap(it)).join("\n") + # fill the HTML generation template from the filenames + echo spans + let htmlTmpl = headerTmpl % [ spans[0], spans[1], spans[2], + spans[3], spans[4], spans[5] ] + + # first "header" + var fdata = "" + fdata.add("# Arraymancer documentation generation\n\n") + fdata.add(&"git.url = \"{gitUrl}\"\n\n") + fdata.add(&"doc.item.seesrc = \"\"\"{docItemSeeSrc}\"\"\"\n\n") + # finally write the HTML document template + fdata.add(&"doc.file = \"\"\"{docFileTmpl}{htmlTmpl}\"\"\"\n") + + # now build the content for the spans + writeFile(getProjectPath() & $DirSep & "nimdoc.cfg", fdata) From 56ae412e754b10439b00cc2d1f8fce29098bff2d Mon Sep 17 00:00:00 2001 From: Vindaar Date: Thu, 2 Apr 2020 18:32:52 +0200 Subject: [PATCH 09/20] add updated nimdoc.cfg --- nimdoc.cfg | 278 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 165 insertions(+), 113 deletions(-) diff --git a/nimdoc.cfg b/nimdoc.cfg index 740838201..c49092d1e 100644 --- a/nimdoc.cfg +++ b/nimdoc.cfg @@ -1,5 +1,4 @@ -## Arraymancer documentation generation -# Inspiration from flyx's NimYaml: https://github.com/flyx/NimYAML +# Arraymancer documentation generation git.url = "https://github.com/mratsim/arraymancer" @@ -9,26 +8,103 @@ class="link-seesrc" target="_blank">Source Edit """ -# TODO: industrialize similar to Nim website: https://github.com/nim-lang/Nim/blob/e758b9408e8fe935117f7f793164f1c9b74cec06/tools/nimweb.nim#L45 -# And: https://github.com/nim-lang/Nim/blob/d3f966922ef4ddd05c137f82e5b2329b3d5dc485/web/website.ini#L31 - -# TODO: move the technical reference to the end (need some CSS so that elements are properly placed) - -doc.file = """ - +doc.file = """ + + - - Arraymancer - $title + + + + + + + + + + + + + +$title + + + + + + - + + +Arraymancer - $title + + + + + + + Fork me on GitHub + + +
    +
    +

    $title

    + $content +
    + +
    +
    +p
    +$analytics
    Arraymancer @@ -37,115 +113,104 @@ doc.file = """ Core tensor API Neural network API Linear algebra, stats, ML IO & Datasets Autograd Neuralnet primitives @@ -179,19 +244,6 @@ doc.file = """
    -
    -
    -

    $title

    - $content -
    - -
    -
    -
    """ From 7412f5ee4066dcb12043a79009af70934c79568a Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 10:22:19 +0200 Subject: [PATCH 10/20] remove stray `p` from html template We don't want every page to have a little 'p' in the bottom left corner, do we? --- docs/generateNimdocCfg.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/generateNimdocCfg.nim b/docs/generateNimdocCfg.nim index 8a46d94ba..12b7c8058 100644 --- a/docs/generateNimdocCfg.nim +++ b/docs/generateNimdocCfg.nim @@ -112,7 +112,7 @@ function main() { -p + $analytics """ From dabc2690e739b352e6ca2ee141bfac491a2cf38f Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 10:23:03 +0200 Subject: [PATCH 11/20] compare and emit names w/o `.html` --- docs/generateNimdocCfg.nim | 101 +++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 50 deletions(-) diff --git a/docs/generateNimdocCfg.nim b/docs/generateNimdocCfg.nim index 12b7c8058..119fcaac0 100644 --- a/docs/generateNimdocCfg.nim +++ b/docs/generateNimdocCfg.nim @@ -194,56 +194,57 @@ const headerTmpl* = """ """ let nameMap = { - "dsl_core.html" : "Neural network: Declaration", - "relu.html" : "Activation: Relu (Rectified linear Unit)", - "sigmoid.html" : "Activation: Sigmoid", - "tanh.html" : "Activation: Tanh", - "conv2D.html" : "Layers: Convolution 2D", - "embedding.html" : "Layers: Embedding", - "gru.html" : "Layers: GRU (Gated Linear Unit)", - "linear.html" : "Layers: Linear/Dense", - "maxpool2D.html" : "Layers: Maxpool 2D", - "cross_entropy_losses.html" : "Loss: Cross-Entropy losses", - "mean_square_error_loss.html" : "Loss: Mean Square Error", - - "reshape_flatten.html" : "Reshape & Flatten", - - "decomposition.html" : "Eigenvalue decomposition", - "decomposition_rand.html" : "Randomized Truncated SVD", - "least_squares.html" : "Least squares solver", - "linear_systems.html" : "Linear systems solver", - "special_matrices.html" : "Special linear algebra matrices", - "stats.html" : "Statistics", - "pca.html" : "Principal Component Analysis (PCA)", - "accuracy_score.html" : "Accuracy score", - "common_error_functions.html" : "Common errors, MAE and MSE (L1, L2 loss)", - "kmeans.html" : "K-Means", - - "mnist.html" : "MNIST", - "imdb.html" : "IMDB", - "io_csv.html" : "CSV reading and writing", - "io_hdf5.html" : "HDF5 files reading and writing", - "io_image.html" : "Images reading and writing", - "io_npy.html" : "Numpy files reading and writing", - - "autograd_common.html" : "Data structure", - "gates_basic.html" : "Basic operations", - "gates_blas.html" : "Linear algebra operations", - "gates_hadamard.html" : "Hadamard product (elementwise matrix multiply)", - "gates_reduce.html" : "Reduction operations", - "gates_shapeshifting_concat_split.html" : "Concatenation, stacking, splitting, chunking operations", - "gates_shapeshifting_views.html" : "Linear algebra operations", - - "nnp_activation.html" : "Activations", - "nnp_convolution.html" : "Convolution 2D", - "nnp_conv2D_cudnn.html" : "Convolution 2D - CuDNN", - "nnp_embedding.html" : "Embeddings", - "nnp_gru.html" : "Gated Recurrent Unit (GRU)", - "nnp_linear.html" : "Linear / Dense layer", - "nnp_maxpooling.html" : "Maxpooling", - "nnp_numerical_gradient.html" : "Numerical gradient", - "nnp_sigmoid_cross_entropy.html" : "Sigmoid Cross-Entropy loss", - "nnp_softmax_cross_entropy.html" : "Softmax Cross-Entropy loss", + "dsl_core" : "Neural network: Declaration", + "relu" : "Activation: Relu (Rectified linear Unit)", + "sigmoid" : "Activation: Sigmoid", + "tanh" : "Activation: Tanh", + "conv2D" : "Layers: Convolution 2D", + "embedding" : "Layers: Embedding", + "gru" : "Layers: GRU (Gated Linear Unit)", + "linear" : "Layers: Linear/Dense", + "maxpool2D" : "Layers: Maxpool 2D", + "cross_entropy_losses" : "Loss: Cross-Entropy losses", + "mean_square_error_loss" : "Loss: Mean Square Error", + + "reshape_flatten" : "Reshape & Flatten", + + "decomposition" : "Eigenvalue decomposition", + "decomposition_rand" : "Randomized Truncated SVD", + "least_squares" : "Least squares solver", + "linear_systems" : "Linear systems solver", + "special_matrices" : "Special linear algebra matrices", + "stats" : "Statistics", + "pca" : "Principal Component Analysis (PCA)", + "accuracy_score" : "Accuracy score", + "common_error_functions" : "Common errors, MAE and MSE (L1, L2 loss)", + "kmeans" : "K-Means", + + "mnist" : "MNIST", + "imdb" : "IMDB", + "io_csv" : "CSV reading and writing", + "io_hdf5" : "HDF5 files reading and writing", + "io_image" : "Images reading and writing", + "io_npy" : "Numpy files reading and writing", + + "autograd_common" : "Data structure", + "gates_basic" : "Basic operations", + "gates_blas" : "Linear algebra operations", + "gates_hadamard" : "Hadamard product (elementwise matrix multiply)", + "gates_reduce" : "Reduction operations", + "gates_shapeshifting_concat_split" : "Concatenation, stacking, splitting, chunking operations", + "gates_shapeshifting_views" : "Linear algebra operations", + + "nnp_activation" : "Activations", + "nnp_convolution" : "Convolution 2D", + "nnp_conv2d_cudnn" : "Convolution 2D - CuDNN", + "nnp_embedding" : "Embeddings", + "nnp_gru" : "Gated Recurrent Unit (GRU)", + "nnp_linear" : "Linear / Dense layer", + "nnp_maxpooling" : "Maxpooling", + "nnp_numerical_gradient" : "Numerical gradient", + "nnp_sigmoid_cross_entropy" : "Sigmoid Cross-Entropy loss", + "nnp_softmax_cross_entropy" : "Softmax Cross-Entropy loss", + "nnp_softmax" : "Softmax" }.toTable proc wrap(name: string): string = From b6f52041c40f6da9dace5ce2efdd6f91e74b2780 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 10:23:40 +0200 Subject: [PATCH 12/20] add `Other docs` category and fix dir names of ag, la --- docs/generateNimdocCfg.nim | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/docs/generateNimdocCfg.nim b/docs/generateNimdocCfg.nim index 119fcaac0..eadb2b5dd 100644 --- a/docs/generateNimdocCfg.nim +++ b/docs/generateNimdocCfg.nim @@ -158,6 +158,12 @@ const headerTmpl* = """ $6 + + Other docs +
      + $7 +
    +
    @@ -268,18 +274,22 @@ proc genNimdocCfg*(path: string) = let catMap = { "tensor" : 1, "nn" : 2, "nn_dsl" : 2, - "la" : 3, + "linear_algebra" : 3, "stats" : 3, "ml" : 3, "datasets" : 4, "io" : 4, - "ag" : 5 , - "nn_primitives" : 6 }.toTable - var spanMap = newSeq[seq[string]](6) + "autograd" : 5 , + "nn_primitives" : 6, + "nlp" : 7, + "math_ops_fusion" : 7, + "laser" : 7, + "private" : 7}.toTable + var spanMap = newSeq[seq[string]](7) for file in files: let baseName = file.extractFilename() - var outfile = baseName.replace(".nim", ".html") + var outfile = baseName.replace(".nim", "") let subDir = file.parentDir.extractFilename if subDir in catMap: echo subDir @@ -288,13 +298,15 @@ proc genNimdocCfg*(path: string) = else: echo "!! subDir ", subDir - var spans = newSeq[string](6) + var spans = newSeq[string](7) for idx in 0 ..< spans.len: spans[idx] = spanMap[idx].sorted.mapIt(wrap(it)).join("\n") # fill the HTML generation template from the filenames echo spans + let htmlTmpl = headerTmpl % [ spans[0], spans[1], spans[2], - spans[3], spans[4], spans[5] ] + spans[3], spans[4], spans[5], + spans[6]] # first "header" var fdata = "" From 0ed3b5de1cde8977d35aa9ebb320dd86e01510db Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 10:24:14 +0200 Subject: [PATCH 13/20] [gemm legacy] remove not used import, fix used import path --- src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim | 4 +--- src/tensor/fallback/legacy/blas_l3_gemm_micro_kernel.nim | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim b/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim index 1bb805eb0..572e0fe75 100644 --- a/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim +++ b/src/tensor/fallback/legacy/blas_l3_gemm_macro_kernel.nim @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ../../backend/openmp - proc gemm_macro_kernel[T](mc, nc, kc: int, alpha: T, beta: T, @@ -58,4 +56,4 @@ proc gemm_macro_kernel[T](mc, nc, kc: int, buffer_C, 1, MR, C, i*MR*incRowC+j*NR*incColC + offC, - incRowC, incColC) \ No newline at end of file + incRowC, incColC) diff --git a/src/tensor/fallback/legacy/blas_l3_gemm_micro_kernel.nim b/src/tensor/fallback/legacy/blas_l3_gemm_micro_kernel.nim index 2932c9e20..b21fbd20a 100644 --- a/src/tensor/fallback/legacy/blas_l3_gemm_micro_kernel.nim +++ b/src/tensor/fallback/legacy/blas_l3_gemm_micro_kernel.nim @@ -13,7 +13,7 @@ # limitations under the License. import macros, - ../backend/memory_optimization_hints + ../../backend/memory_optimization_hints macro unroll_ukernel[MRNR, T](AB: array[MRNR, T], a: ptr UncheckedArray[T], offA: int, From aa8bcce8af454b2dd342963c81757cf89b8f68db Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 13:32:34 +0200 Subject: [PATCH 14/20] merge nimdoc creationg into `docs', add `templates` file --- arraymancer.nimble | 2 +- docs/docs.nim | 150 +++++++++++++++++- ...erateNimdocCfg.nim => nimDocTemplates.nim} | 126 --------------- 3 files changed, 149 insertions(+), 129 deletions(-) rename docs/{generateNimdocCfg.nim => nimDocTemplates.nim} (66%) diff --git a/arraymancer.nimble b/arraymancer.nimble index ed4dd6041..e602a2552 100644 --- a/arraymancer.nimble +++ b/arraymancer.nimble @@ -230,7 +230,7 @@ task test_release, "Run all tests - Release mode": test "tests_cpu", " -d:release" -import docs / [docs, generateNimdocCfg] +import docs / docs task gen_docs, "Generate Arraymancer documentation": # generate nimdoc.cfg file so we can generate the correct header for the # index.html page without having to mess with the HTML manually. diff --git a/docs/docs.nim b/docs/docs.nim index e36e2d86e..5a9351ed6 100644 --- a/docs/docs.nim +++ b/docs/docs.nim @@ -1,6 +1,6 @@ -import macros, strformat, strutils, sequtils, sets +import macros, strformat, strutils, sequtils, sets, tables, algorithm -from os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/` +from os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/`, setCurrentDir when defined(nimdoc): from os import getCurrentDir, paramCount, paramStr @@ -68,6 +68,8 @@ proc getFiles*(path: string): seq[string] = for dir in listDirs(path): result.add getFiles(dir) +import nimDocTemplates + proc buildDocs*(path: string, docPath: string, baseDir = getProjectPath() & $DirSep, defines: openArray[string] = @[]) = ## Generate docs for all nim files in `path` and output all HTML files to the @@ -152,3 +154,147 @@ proc buildDocs*(path: string, docPath: string, baseDir = getProjectPath() & $Dir # echo "Processed files: ", fileSet if duplSet.card > 0: echo "WARNING: Duplicate filenames detected: ", duplSet + + +let nameMap = { + "dsl_core" : "Neural network: Declaration", + "relu" : "Activation: Relu (Rectified linear Unit)", + "sigmoid" : "Activation: Sigmoid", + "tanh" : "Activation: Tanh", + "conv2D" : "Layers: Convolution 2D", + "embedding" : "Layers: Embedding", + "gru" : "Layers: GRU (Gated Linear Unit)", + "linear" : "Layers: Linear/Dense", + "maxpool2D" : "Layers: Maxpool 2D", + "cross_entropy_losses" : "Loss: Cross-Entropy losses", + "mean_square_error_loss" : "Loss: Mean Square Error", + "softmax" : "Softmax", + "optimizers" : "Optimizers", + "init" : "Layers: Initializations" + + "reshape_flatten" : "Reshape & Flatten", + + "decomposition" : "Eigenvalue decomposition", + "decomposition_rand" : "Randomized Truncated SVD", + "least_squares" : "Least squares solver", + "linear_systems" : "Linear systems solver", + "special_matrices" : "Special linear algebra matrices", + "stats" : "Statistics", + "pca" : "Principal Component Analysis (PCA)", + "accuracy_score" : "Accuracy score", + "common_error_functions" : "Common errors, MAE and MSE (L1, L2 loss)", + "kmeans" : "K-Means", + + "mnist" : "MNIST", + "imdb" : "IMDB", + "io_csv" : "CSV reading and writing", + "io_hdf5" : "HDF5 files reading and writing", + "io_image" : "Images reading and writing", + "io_npy" : "Numpy files reading and writing", + + "autograd_common" : "Data structure", + "gates_basic" : "Basic operations", + "gates_blas" : "Linear algebra operations", + "gates_hadamard" : "Hadamard product (elementwise matrix multiply)", + "gates_reduce" : "Reduction operations", + "gates_shapeshifting_concat_split" : "Concatenation, stacking, splitting, chunking operations", + "gates_shapeshifting_views" : "Linear algebra operations", + + "nnp_activation" : "Activations", + "nnp_convolution" : "Convolution 2D", + "nnp_conv2d_cudnn" : "Convolution 2D - CuDNN", + "nnp_embedding" : "Embeddings", + "nnp_gru" : "Gated Recurrent Unit (GRU)", + "nnp_linear" : "Linear / Dense layer", + "nnp_maxpooling" : "Maxpooling", + "nnp_numerical_gradient" : "Numerical gradient", + "nnp_sigmoid_cross_entropy" : "Sigmoid Cross-Entropy loss", + "nnp_softmax_cross_entropy" : "Softmax Cross-Entropy loss", + "nnp_softmax" : "Softmax" +}.toTable + +proc wrap(name: string): string = + const tmpl = """
  • $#
  • """ + if name in nameMap: + result = tmpl % [name & ".html", nameMap[name]] + else: + result = tmpl % [name & ".html", name] + +proc getHeaderMap(path: string): seq[seq[string]] = + ## returns a nesteed seq where each element is a `seq[string]` containing + ## all elements to be added to the header at the index. The index + ## corresponds to the `$N` of the `nimDocTemplates.headerTmpl` field. + const excludeFiles = [ "nn", # only imports and exports `NN` files + "nn_dsl", # only imports and exports `NN DSL` files + "ml", # only imports and exports `ML` files + "io", # only imports and exports `io` files + "autograd", # only imports and exports `autograd` files + "blis" # doesn't import or export anything + ] + let ExcludeFileSet = toSet(excludeFiles) + # map of the different header categories + let catMap = { "tensor" : 1, + "nn" : 2, + "nn_dsl" : 2, + "linear_algebra" : 3, + "stats" : 3, + "ml" : 3, + "datasets" : 4, + "io" : 4, + "autograd" : 5 , + "nn_primitives" : 6, + "nlp" : 7, + "math_ops_fusion" : 7, + "laser" : 7, + "private" : 7}.toTable + + # `indexOverride` is used to override the index of the header the file + # is added to. Some files may be part of e.g. `tensor` but shouldn't be + # listed there, since they aren't that important. + # NOTE: the elements here are ``filenames`` and ``not`` directories! + let indexOverride = { "global_config" : 7 }.toTable + let files = getFiles(path) + + result = newSeq[seq[string]](7) + for file in files: + let baseName = file.extractFilename() + let outfile = baseName.replace(".nim", "") + if outfile in ExcludeFileSet: continue + let subDir = file.removePrefix(path).split('/')[0] + if subDir in catMap: + var idx: int + if outfile notin indexOverride: + idx = catMap[subDir] - 1 + else: + idx = indexOverride[outfile] - 1 + result[idx].add outfile + +proc genNimdocCfg*(path: string) = + ## This proc generates the `nimdoc.cfg`, which sits at the root of the + ## arraymancer repository. We generate it so that we can combine the + ## front page template derived from flyx's NimYaml: https://github.com/flyx/NimYAML + ## with the standard Nim document generation. We generate the fields for + ## the header links from the actual files found in each diretory. + ## + ## NOTE: manual intervention is required for each directory that is added + ## and should show up as its own tab in the header. Essentially look at the + ## `$` spans in the `docFileTmpl` above to see what to do. + let headerMap = getHeaderMap(path) + # create the strings based on the header map for each span + var spans = newSeq[string](7) + for idx in 0 ..< spans.len: + spans[idx] = headerMap[idx].sorted.mapIt(wrap(it)).join("\n") + # fill the HTML generation template from the filenames + let htmlTmpl = headerTmpl % [ spans[0], spans[1], spans[2], + spans[3], spans[4], spans[5], + spans[6]] + # first "header" + var fdata = "" + fdata.add("# Arraymancer documentation generation\n\n") + fdata.add(&"git.url = \"{gitUrl}\"\n\n") + fdata.add(&"doc.item.seesrc = \"\"\"{docItemSeeSrc}\"\"\"\n\n") + # finally write the HTML document template + fdata.add(&"doc.file = \"\"\"{docFileTmpl}{htmlTmpl}\"\"\"\n") + + # now build the content for the spans + writeFile(getProjectPath() & $DirSep & "nimdoc.cfg", fdata) diff --git a/docs/generateNimdocCfg.nim b/docs/nimDocTemplates.nim similarity index 66% rename from docs/generateNimdocCfg.nim rename to docs/nimDocTemplates.nim index eadb2b5dd..e3e463cac 100644 --- a/docs/generateNimdocCfg.nim +++ b/docs/nimDocTemplates.nim @@ -1,9 +1,3 @@ -import strutils, docs, tables, sequtils, macros, strformat, algorithm -from os import parentDir, getCurrentCompilerExe, DirSep, extractFilename, `/` - -## Arraymancer documentation generation - - const gitUrl* = "https://github.com/mratsim/arraymancer" const docItemSeeSrc* = """   """ - -let nameMap = { - "dsl_core" : "Neural network: Declaration", - "relu" : "Activation: Relu (Rectified linear Unit)", - "sigmoid" : "Activation: Sigmoid", - "tanh" : "Activation: Tanh", - "conv2D" : "Layers: Convolution 2D", - "embedding" : "Layers: Embedding", - "gru" : "Layers: GRU (Gated Linear Unit)", - "linear" : "Layers: Linear/Dense", - "maxpool2D" : "Layers: Maxpool 2D", - "cross_entropy_losses" : "Loss: Cross-Entropy losses", - "mean_square_error_loss" : "Loss: Mean Square Error", - - "reshape_flatten" : "Reshape & Flatten", - - "decomposition" : "Eigenvalue decomposition", - "decomposition_rand" : "Randomized Truncated SVD", - "least_squares" : "Least squares solver", - "linear_systems" : "Linear systems solver", - "special_matrices" : "Special linear algebra matrices", - "stats" : "Statistics", - "pca" : "Principal Component Analysis (PCA)", - "accuracy_score" : "Accuracy score", - "common_error_functions" : "Common errors, MAE and MSE (L1, L2 loss)", - "kmeans" : "K-Means", - - "mnist" : "MNIST", - "imdb" : "IMDB", - "io_csv" : "CSV reading and writing", - "io_hdf5" : "HDF5 files reading and writing", - "io_image" : "Images reading and writing", - "io_npy" : "Numpy files reading and writing", - - "autograd_common" : "Data structure", - "gates_basic" : "Basic operations", - "gates_blas" : "Linear algebra operations", - "gates_hadamard" : "Hadamard product (elementwise matrix multiply)", - "gates_reduce" : "Reduction operations", - "gates_shapeshifting_concat_split" : "Concatenation, stacking, splitting, chunking operations", - "gates_shapeshifting_views" : "Linear algebra operations", - - "nnp_activation" : "Activations", - "nnp_convolution" : "Convolution 2D", - "nnp_conv2d_cudnn" : "Convolution 2D - CuDNN", - "nnp_embedding" : "Embeddings", - "nnp_gru" : "Gated Recurrent Unit (GRU)", - "nnp_linear" : "Linear / Dense layer", - "nnp_maxpooling" : "Maxpooling", - "nnp_numerical_gradient" : "Numerical gradient", - "nnp_sigmoid_cross_entropy" : "Sigmoid Cross-Entropy loss", - "nnp_softmax_cross_entropy" : "Softmax Cross-Entropy loss", - "nnp_softmax" : "Softmax" -}.toTable - -proc wrap(name: string): string = - const tmpl = """
  • $#
  • """ - if name in nameMap: - result = tmpl % [name, nameMap[name]] - else: - result = tmpl % [name, name] - -proc genNimdocCfg*(path: string) = - ## This proc generates the `nimdoc.cfg`, which sits at the root of the - ## arraymancer repository. We generate it so that we can combine the - ## front page template derived from flyx's NimYaml: https://github.com/flyx/NimYAML - ## with the standard Nim document generation. We generate the fields for - ## the header links from the actual files found in each diretory. - ## - ## NOTE: manual intervention is required for each directory that is added - ## and should show up as its own tab in the header. Essentially look at the - ## `$` spans in the `docFileTmpl` above to see what to do. - let files = getFiles(path) - let catMap = { "tensor" : 1, - "nn" : 2, - "nn_dsl" : 2, - "linear_algebra" : 3, - "stats" : 3, - "ml" : 3, - "datasets" : 4, - "io" : 4, - "autograd" : 5 , - "nn_primitives" : 6, - "nlp" : 7, - "math_ops_fusion" : 7, - "laser" : 7, - "private" : 7}.toTable - var spanMap = newSeq[seq[string]](7) - - for file in files: - let baseName = file.extractFilename() - var outfile = baseName.replace(".nim", "") - let subDir = file.parentDir.extractFilename - if subDir in catMap: - echo subDir - let idx = catMap[subDir] - 1 - spanMap[idx].add outfile - else: - echo "!! subDir ", subDir - - var spans = newSeq[string](7) - for idx in 0 ..< spans.len: - spans[idx] = spanMap[idx].sorted.mapIt(wrap(it)).join("\n") - # fill the HTML generation template from the filenames - echo spans - - let htmlTmpl = headerTmpl % [ spans[0], spans[1], spans[2], - spans[3], spans[4], spans[5], - spans[6]] - - # first "header" - var fdata = "" - fdata.add("# Arraymancer documentation generation\n\n") - fdata.add(&"git.url = \"{gitUrl}\"\n\n") - fdata.add(&"doc.item.seesrc = \"\"\"{docItemSeeSrc}\"\"\"\n\n") - # finally write the HTML document template - fdata.add(&"doc.file = \"\"\"{docFileTmpl}{htmlTmpl}\"\"\"\n") - - # now build the content for the spans - writeFile(getProjectPath() & $DirSep & "nimdoc.cfg", fdata) From 4b3f22fee93084aef01372d73d03bc42ce906302 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 13:33:16 +0200 Subject: [PATCH 15/20] correctly set path to `source` and `edit` in docs --- docs/docs.nim | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/docs/docs.nim b/docs/docs.nim index 5a9351ed6..e2e0f5b1b 100644 --- a/docs/docs.nim +++ b/docs/docs.nim @@ -71,6 +71,7 @@ proc getFiles*(path: string): seq[string] = import nimDocTemplates proc buildDocs*(path: string, docPath: string, baseDir = getProjectPath() & $DirSep, + masterBranch = "master", defines: openArray[string] = @[]) = ## Generate docs for all nim files in `path` and output all HTML files to the ## `docPath` in a flattened form (subdirectories are removed). @@ -80,6 +81,9 @@ proc buildDocs*(path: string, docPath: string, baseDir = getProjectPath() & $Dir ## `baseDir` is the project path by default and `files` and `path` are relative ## to that directory. Set to "" if using absolute paths. ## + ## `masterBranch` is the name of the default branch to which the docs should link + ## when clicking the `Source` button below a procedure etc. + ## ## `defines` is a list of `-d:xxx` define flags (the `xxx` part) that should be passed ## to `nim doc` so that `getHeader()` is invoked correctly. ## @@ -134,13 +138,18 @@ proc buildDocs*(path: string, docPath: string, baseDir = getProjectPath() & $Dir fileSet.incl outfile outfile = docPath / outfile echo "Processing: ", outfile, " [", idx, "/", files.len, "]" - # now call `nim doc` on each file - echo execAction(&"{nim} doc {defStr} -o:{outfile} --index:on {file}") + # NOTE: Changing the current working directory to the project path is required in order for + # `git.commit:` to work! Otherwise we sit in `docs` and for some reason the relative path + # will eat one piece of the resulting `source` links and thereby removing the actual branch + # and we end up with a broken link! + echo execAction(&"cd {getProjectPath()} && {nim} doc {defStr} --git.commit:{masterBranch} -o:{outfile} --index:on {file}") inc idx ## now build the index echo execAction(&"{nim} buildIndex -o:{docPath}/theindex.html {docPath}") when declared(getNimRootDir): #[ + NOTE: running it locally doesn't work anymore on modern chromium browser, + because they block "access from origin 'null' due to CORS policy". this enables doc search, works at least locally with: cd {docPath} && python -m SimpleHTTPServer 9009 ]# From 386548915362022809bb4542abbe1bb938150a41 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 13:38:06 +0200 Subject: [PATCH 16/20] add scrolling for the header Currently setting the max size to 800 px. I'm not a css wizard, so I don't know how to make it take into account device size. --- docs/nav.css | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/nav.css b/docs/nav.css index 7efa28b2c..0a35153b0 100644 --- a/docs/nav.css +++ b/docs/nav.css @@ -77,6 +77,12 @@ header span ul.monospace a { font-family: "Source Code Pro", Menlo, "Courier New", Courier, monospace; } +header span ul span ul { + max-height: 800px;/* you can change as you need it */ + overflow:auto;/* to get scroll */ +} + + header a:link, header a:visited { background: inherit; From 1dca60c6a7ca25ea6c86528732574909ac8d1ef0 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 13:38:40 +0200 Subject: [PATCH 17/20] fix naming for layer initializations --- docs/docs.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs.nim b/docs/docs.nim index e2e0f5b1b..5bbbdedeb 100644 --- a/docs/docs.nim +++ b/docs/docs.nim @@ -179,7 +179,7 @@ let nameMap = { "mean_square_error_loss" : "Loss: Mean Square Error", "softmax" : "Softmax", "optimizers" : "Optimizers", - "init" : "Layers: Initializations" + "init" : "Layers: Initializations", "reshape_flatten" : "Reshape & Flatten", From 147dd0d6d40ef5348b58eac8efeb6196a413686d Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sat, 4 Apr 2020 13:38:59 +0200 Subject: [PATCH 18/20] update nimdoc.cfg --- nimdoc.cfg | 218 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 160 insertions(+), 58 deletions(-) diff --git a/nimdoc.cfg b/nimdoc.cfg index c49092d1e..5d9d3f5f6 100644 --- a/nimdoc.cfg +++ b/nimdoc.cfg @@ -103,7 +103,7 @@ function main() { -p + $analytics
    Arraymancer @@ -113,95 +113,157 @@ $analytics Core tensor API Neural network API Linear algebra, stats, ML IO & Datasets Autograd Neuralnet primitives + + + Other docs + From 6af866df7439ab11185f2ee8d61b330e8a172d57 Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sun, 5 Apr 2020 23:32:18 +0200 Subject: [PATCH 19/20] move doc generation to src/docs If the files are located outside the src dir, nimble will fail to install the package, since for some reason it does someting with its generated nim script file from the ~/.nimble/pkg directory from which it obviously cannot find docs / docs, since that isn't part of the source. --- arraymancer.nimble | 2 +- {docs => src/docs}/docs.nim | 0 {docs => src/docs}/nimDocTemplates.nim | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename {docs => src/docs}/docs.nim (100%) rename {docs => src/docs}/nimDocTemplates.nim (100%) diff --git a/arraymancer.nimble b/arraymancer.nimble index e602a2552..793433d00 100644 --- a/arraymancer.nimble +++ b/arraymancer.nimble @@ -230,7 +230,7 @@ task test_release, "Run all tests - Release mode": test "tests_cpu", " -d:release" -import docs / docs +import src / docs / docs task gen_docs, "Generate Arraymancer documentation": # generate nimdoc.cfg file so we can generate the correct header for the # index.html page without having to mess with the HTML manually. diff --git a/docs/docs.nim b/src/docs/docs.nim similarity index 100% rename from docs/docs.nim rename to src/docs/docs.nim diff --git a/docs/nimDocTemplates.nim b/src/docs/nimDocTemplates.nim similarity index 100% rename from docs/nimDocTemplates.nim rename to src/docs/nimDocTemplates.nim From 9e748ffd6b6fedd34e881f08e6e42522bf93a30d Mon Sep 17 00:00:00 2001 From: Vindaar Date: Sun, 5 Apr 2020 23:58:20 +0200 Subject: [PATCH 20/20] move docs back to docs/, hack around actual problem Sorry to solve it this way. I feel like this might be a nimble bug. Even if the files are in the actual source directory of the package, it still says it can't import them. --- arraymancer.nimble | 39 ++++++++++++++++---------- {src/docs => docs}/docs.nim | 0 {src/docs => docs}/nimDocTemplates.nim | 0 3 files changed, 24 insertions(+), 15 deletions(-) rename {src/docs => docs}/docs.nim (100%) rename {src/docs => docs}/nimDocTemplates.nim (100%) diff --git a/arraymancer.nimble b/arraymancer.nimble index 793433d00..b7ef792d1 100644 --- a/arraymancer.nimble +++ b/arraymancer.nimble @@ -230,18 +230,27 @@ task test_release, "Run all tests - Release mode": test "tests_cpu", " -d:release" -import src / docs / docs -task gen_docs, "Generate Arraymancer documentation": - # generate nimdoc.cfg file so we can generate the correct header for the - # index.html page without having to mess with the HTML manually. - genNimdocCfg("src/") - # build the actual docs and the index - buildDocs("src/", "docs/build") - # Copy our stylesheets - cpFile("docs/docutils.css", "docs/build/docutils.css") - cpFile("docs/nav.css", "docs/build/nav.css") - # Process the rst - for filePath in listFiles("docs/"): - if filePath[^4..^1] == ".rst": - let modName = filePath[5..^5] - exec r"nim rst2html -o:docs/build/" & modName & ".html " & filePath +template canImport(x: untyped): untyped = + compiles: + import x + +when canImport(docs / docs): + # can define the `gen_docs` task (docs already imported now) + # this is to hack around weird nimble + nimscript behavior. + # when overwriting an install nimble will try to parse the generated + # nimscript file and for some reason then it won't be able to import + # the module (even if it's put into `src/`). + task gen_docs, "Generate Arraymancer documentation": + # generate nimdoc.cfg file so we can generate the correct header for the + # index.html page without having to mess with the HTML manually. + genNimdocCfg("src/") + # build the actual docs and the index + buildDocs("src/", "docs/build") + # Copy our stylesheets + cpFile("docs/docutils.css", "docs/build/docutils.css") + cpFile("docs/nav.css", "docs/build/nav.css") + # Process the rst + for filePath in listFiles("docs/"): + if filePath[^4..^1] == ".rst": + let modName = filePath[5..^5] + exec r"nim rst2html -o:docs/build/" & modName & ".html " & filePath diff --git a/src/docs/docs.nim b/docs/docs.nim similarity index 100% rename from src/docs/docs.nim rename to docs/docs.nim diff --git a/src/docs/nimDocTemplates.nim b/docs/nimDocTemplates.nim similarity index 100% rename from src/docs/nimDocTemplates.nim rename to docs/nimDocTemplates.nim